示例#1
0
 def testRealContentCanBeLoaded(self):
   """Tests that *something* from the real JSON files can be loaded."""
   # This directory is not available on swarming, so if it doesn't exist, just
   # skip the test.
   if not os.path.exists(builders.TESTING_BUILDBOT_DIR):
     return
   self.assertNotEqual(len(builders.GetCiBuilders('webgl_conformance')), 0)
示例#2
0
def main():
    args = ParseArgs()
    # TODO(crbug.com/1108016): Remove this warning once ResultDB is enabled on all
    # builders and there is enough data for the results to be trusted.
    WarnUserOfIncompleteRollout()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = queries.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders, args.suite, args.project,
        args.num_samples)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        queries.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders, args.suite,
                                                 args.project,
                                                 args.num_samples))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    result_output.OutputResults(test_expectation_map, unmatched,
                                unused_expectations, args.output_format)
def main():
    args = ParseArgs()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))

    querier = queries.BigQueryQuerier(args.suite, args.project,
                                      args.num_samples, args.large_query_mode)
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = querier.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        querier.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
        test_expectation_map)
    result_output.OutputResults(stale, semi_stale, active, unmatched,
                                unused_expectations, args.output_format)

    affected_urls = set()
    stale_message = ''
    if args.remove_stale_expectations:
        stale_expectations = []
        for _, expectation_map in stale.iteritems():
            stale_expectations.extend(expectation_map.keys())
        stale_expectations.extend(unused_expectations)
        affected_urls |= expectations.RemoveExpectationsFromFile(
            stale_expectations, args.expectation_file)
        stale_message += (
            'Stale expectations removed from %s. Stale comments, '
            'etc. may still need to be removed.\n' % args.expectation_file)

    if args.modify_semi_stale_expectations:
        affected_urls |= expectations.ModifySemiStaleExpectations(
            semi_stale, args.expectation_file)
        stale_message += ('Semi-stale expectations modified in %s. Stale '
                          'comments, etc. may still need to be removed.\n' %
                          args.expectation_file)

    if stale_message:
        print(stale_message)
    if affected_urls:
        result_output.OutputAffectedUrls(affected_urls)
    def testJsonContentLoaded(self):
        """Tests that the correct JSON data is loaded in."""
        self.setUpPyfakefs()
        gpu_json = {
            'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
            'Android Release (Nexus 5X)': {
                'isolated_scripts': [{
                    'args': [
                        'webgl_conformance',
                    ],
                    'isolate_name':
                    'telemetry_gpu_integration_test',
                }],
            },
            'GPU Linux Builder': {},
        }
        gpu_fyi_json = {
            'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
            'ANGLE GPU Android Release (Nexus 5X)': {
                'isolated_scripts': [{
                    'args': [
                        'webgl_conformance',
                    ],
                    'isolate_name':
                    'telemetry_gpu_integration_test',
                }],
            },
            'GPU FYI Linux Builder': {},
        }

        self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
                                     'chromium.gpu.json'),
                        contents=json.dumps(gpu_json))
        self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
                                     'chromium.gpu.fyi.json'),
                        contents=json.dumps(gpu_fyi_json))

        gpu_builders = builders.GetCiBuilders('webgl_conformance')
        self.assertEqual(
            gpu_builders,
            set([
                'Android Release (Nexus 5X)',
                'ANGLE GPU Android Release (Nexus 5X)'
            ]))
示例#5
0
  def testFilterBySuite(self):
    """Tests that only builders that run the given suite are returned."""
    self.setUpPyfakefs()
    gpu_json = {
        'AAAAA1 AUTOGENERATED FILE DO NOT EDIT': {},
        'Android Tester': {
            'isolated_scripts': [
                {
                    'args': [
                        'webgl_conformance',
                    ],
                    'isolate_name': 'not_telemetry',
                },
            ],
        },
        'Linux Tester': {
            'isolated_scripts': [
                {
                    'args': [
                        'not_a_suite',
                    ],
                    'isolate_name': 'telemetry_gpu_integration_test',
                },
            ],
        },
        'Windows Tester': {
            'isolated_scripts': [
                {
                    'args': [
                        'webgl_conformance',
                    ],
                    'isolate_name': 'telemetry_gpu_integration_test',
                },
            ],
        },
    }

    self.CreateFile(os.path.join(builders.TESTING_BUILDBOT_DIR,
                                 'chromium.json'),
                    contents=json.dumps(gpu_json))

    gpu_builders = builders.GetCiBuilders('webgl_conformance')
    self.assertEqual(gpu_builders, set(['Windows Tester']))
def main():
    args = ParseArgs()
    # TODO(crbug.com/1108016): Remove this warning once ResultDB is enabled on all
    # builders and there is enough data for the results to be trusted.
    WarnUserOfIncompleteRollout()
    test_expectation_map = expectations.CreateTestExpectationMap(
        args.expectation_file, args.tests)
    ci_builders = builders.GetCiBuilders(
        SUITE_TO_TELEMETRY_SUITE_MAP.get(args.suite, args.suite))
    # Unmatched results are mainly useful for script maintainers, as they don't
    # provide any additional information for the purposes of finding unexpectedly
    # passing tests or unused expectations.
    unmatched = queries.FillExpectationMapForCiBuilders(
        test_expectation_map, ci_builders, args.suite, args.project,
        args.num_samples)
    try_builders = builders.GetTryBuilders(ci_builders)
    unmatched.update(
        queries.FillExpectationMapForTryBuilders(test_expectation_map,
                                                 try_builders, args.suite,
                                                 args.project,
                                                 args.num_samples))
    unused_expectations = expectations.FilterOutUnusedExpectations(
        test_expectation_map)
    stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
        test_expectation_map)
    result_output.OutputResults(stale, semi_stale, active, unmatched,
                                unused_expectations, args.output_format)

    if args.remove_stale_expectations:
        stale_expectations = []
        for _, expectation_map in stale.iteritems():
            stale_expectations.extend(expectation_map.keys())
        stale_expectations.extend(unused_expectations)
        removed_urls = expectations.RemoveExpectationsFromFile(
            stale_expectations, args.expectation_file)
        print(
            'Stale expectations removed from %s. Stale comments, etc. may still '
            'need to be removed.' % args.expectation_file)
        result_output.OutputRemovedUrls(removed_urls)