Example #1
0
    def testWebGlVersion(self):
        """Tests that only results for the correct WebGL version are returned."""
        query_results = [
            {
                'id':
                'build-1234',
                'test_id':
                ('ninja://chrome/test:telemetry_gpu_integration_test/'
                 'gpu_tests.webgl_conformance_integration_test.'
                 'WebGLConformanceIntegrationTest.test_name'),
                'status':
                'FAIL',
                'typ_expectations': [
                    'RetryOnFailure',
                ],
                'typ_tags': [
                    'webgl-version-1',
                ],
                'step_name':
                'step_name',
            },
            {
                'id':
                'build-2345',
                'test_id':
                ('ninja://chrome/test:telemetry_gpu_integration_test/'
                 'gpu_tests.webgl_conformance_integration_test.'
                 'WebGLConformanceIntegrationTest.test_name'),
                'status':
                'FAIL',
                'typ_expectations': [
                    'RetryOnFailure',
                ],
                'typ_tags': [
                    'webgl-version-2',
                ],
                'step_name':
                'step_name',
            },
        ]
        querier = unittest_utils.CreateGenericQuerier(
            suite='webgl_conformance1')
        self._popen_mock.return_value = unittest_utils.FakeProcess(
            stdout=json.dumps(query_results))
        results = querier.QueryBuilder('builder', 'ci')
        self.assertEqual(len(results), 1)
        self.assertEqual(
            results[0],
            data_types.Result('test_name', ['webgl-version-1'], 'Failure',
                              'step_name', '1234'))

        querier = unittest_utils.CreateGenericQuerier(
            suite='webgl_conformance2')
        results = querier.QueryBuilder('builder', 'ci')
        self.assertEqual(len(results), 1)
        self.assertEqual(
            results[0],
            data_types.Result('test_name', ['webgl-version-2'], 'Failure',
                              'step_name', '2345'))
Example #2
0
 def SideEffect(builder, *args):
   del args
   if builder == 'matched_builder':
     return [
         data_types.Result('foo', ['win'], 'Pass', 'step_name', 'build_id')
     ]
   else:
     return [data_types.Result('bar', [], 'Pass', 'step_name', 'build_id')]
Example #3
0
    def testRetryFailThenPassMatching(self):
        """Tests when there are pass and fail results for retry expectations."""
        foo_fail_result = data_types.Result('foo/test', ['win10'], 'Failure',
                                            'pixel_tests', 'build_id')
        foo_pass_result = data_types.Result('foo/test', ['win10'], 'Pass',
                                            'pixel_tests', 'build_id')
        expectation_map = self.GetEmptyMapForGenericRetryExpectation()
        unmatched_results = expectations.AddResultListToMap(
            expectation_map, 'builder', [foo_fail_result, foo_pass_result])
        self.assertEqual(unmatched_results, [])

        expected_expectation_map = self.GetFailedMapForExpectation(
            self.GetGenericRetryExpectation())
        self.assertEqual(expectation_map, expected_expectation_map)
Example #4
0
 def testResultMatchFailingExisting(self):
     """Test adding a failing result when results for a builder exist."""
     r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
                           'pixel_tests', 'build_id')
     e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
     stats = data_types.BuildStats()
     stats.AddPassedBuild()
     expectation_map = data_types.TestExpectationMap({
         'some/test/*':
         data_types.ExpectationBuilderMap({
             e:
             data_types.BuilderStepMap({
                 'builder':
                 data_types.StepBuildStatsMap({
                     'pixel_tests': stats,
                 }),
             }),
         }),
     })
     found_matching = expectations._AddResultToMap(r, 'builder',
                                                   expectation_map)
     self.assertTrue(found_matching)
     stats = data_types.BuildStats()
     stats.AddFailedBuild('build_id')
     stats.AddPassedBuild()
     expected_expectation_map = {
         'some/test/*': {
             e: {
                 'builder': {
                     'pixel_tests': stats,
                 },
             },
         },
     }
     self.assertEqual(expectation_map, expected_expectation_map)
Example #5
0
 def testResultMatchMultiMatch(self):
     """Test adding a passing result when multiple expectations match."""
     r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
                           'pixel_tests', 'build_id')
     e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
     e2 = data_types.Expectation('some/test/case', ['win10'], 'Failure')
     expectation_map = data_types.TestExpectationMap({
         'some/test/*':
         data_types.ExpectationBuilderMap({
             e: data_types.BuilderStepMap(),
             e2: data_types.BuilderStepMap(),
         }),
     })
     found_matching = expectations._AddResultToMap(r, 'builder',
                                                   expectation_map)
     self.assertTrue(found_matching)
     stats = data_types.BuildStats()
     stats.AddPassedBuild()
     expected_expectation_map = {
         'some/test/*': {
             e: {
                 'builder': {
                     'pixel_tests': stats,
                 },
             },
             e2: {
                 'builder': {
                     'pixel_tests': stats,
                 },
             }
         }
     }
     self.assertEqual(expectation_map, expected_expectation_map)
Example #6
0
 def testValidResults(self):
   """Tests functionality when valid results are returned."""
   query_results = [
       {
           'id':
           'build-1234',
           'test_id': ('ninja://chrome/test:telemetry_gpu_integration_test/'
                       'gpu_tests.pixel_integration_test.'
                       'PixelIntegrationTest.test_name'),
           'status':
           'FAIL',
           'typ_expectations': [
               'RetryOnFailure',
           ],
           'typ_tags': [
               'win',
               'intel',
           ],
           'step_name':
           'step_name',
       },
   ]
   self._popen_mock.return_value = unittest_utils.FakeProcess(
       stdout=json.dumps(query_results))
   results = self._querier.QueryBuilder('builder', 'ci')
   self.assertEqual(len(results), 1)
   self.assertEqual(
       results[0],
       data_types.Result('test_name', ['win', 'intel'], 'Failure', 'step_name',
                         '1234'))
    def testOutputResultsSmoketest(self):
        """Test that nothing blows up when outputting."""
        expectation_map = {
            'foo': {
                data_types.Expectation('foo', ['win', 'intel'], 'RetryOnFailure'):
                {
                    'stale': {
                        'all_pass': uu.CreateStatsWithPassFails(2, 0),
                    },
                },
                data_types.Expectation('foo', ['linux'], 'Failure'): {
                    'semi_stale': {
                        'all_pass': uu.CreateStatsWithPassFails(2, 0),
                        'some_pass': uu.CreateStatsWithPassFails(1, 1),
                        'none_pass': uu.CreateStatsWithPassFails(0, 2),
                    },
                },
                data_types.Expectation('foo', ['mac'], 'Failure'): {
                    'active': {
                        'none_pass': uu.CreateStatsWithPassFails(0, 2),
                    },
                },
            },
        }
        unmatched_results = {
            'builder': [
                data_types.Result('foo', ['win', 'intel'], 'Failure',
                                  'step_name', 'build_id'),
            ],
        }
        unmatched_expectations = [
            data_types.Expectation('foo', ['linux'], 'RetryOnFailure')
        ]

        stale, semi_stale, active = expectations.SplitExpectationsByStaleness(
            expectation_map)

        result_output.OutputResults(stale, semi_stale, active, {}, [], 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, [], 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'print',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'print', self._file_handle)

        result_output.OutputResults(stale, semi_stale, active, {}, [], 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, [], 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active, {},
                                    unmatched_expectations, 'html',
                                    self._file_handle)
        result_output.OutputResults(stale, semi_stale, active,
                                    unmatched_results, unmatched_expectations,
                                    'html', self._file_handle)
Example #8
0
 def testResultMatchPassingNew(self):
     """Test adding a passing result when no results for a builder exist."""
     r = data_types.Result('some/test/case', ['win', 'win10'], 'Pass',
                           'pixel_tests', 'build_id')
     e = data_types.Expectation('some/test/*', ['win10'], 'Failure')
     expectation_map = {
         'some/test/*': {
             e: {},
         },
     }
     found_matching = expectations._AddResultToMap(r, 'builder',
                                                   expectation_map)
     self.assertTrue(found_matching)
     stats = data_types.BuildStats()
     stats.AddPassedBuild()
     expected_expectation_map = {
         'some/test/*': {
             e: {
                 'builder': {
                     'pixel_tests': stats,
                 },
             },
         },
     }
     self.assertEqual(expectation_map, expected_expectation_map)
Example #9
0
    def QueryBuilder(self, builder, builder_type):
        """Queries ResultDB for results from |builder|.

    Args:
      builder: A string containing the name of the builder to query.
      builder_type: A string containing the type of builder to query, either
          "ci" or "try".

    Returns:
      The results returned by the query converted into a list of
      data_types.Resultobjects.
    """

        test_filter_clause = self._GetTestFilterClauseForBuilder(
            builder, builder_type)
        if test_filter_clause is None:
            # No affected tests on this builder, so early return.
            return []

        query = GPU_BQ_QUERY_TEMPLATE.format(
            builder_type=builder_type,
            test_filter_clause=test_filter_clause,
            suite=self._suite)

        query_results = self._RunBigQueryCommandForJsonOutput(
            query, {
                '': {
                    'builder_name': builder
                },
                'INT64': {
                    'num_builds': self._num_samples
                }
            })
        results = []
        if not query_results:
            # Don't bother logging if we know this is a fake CI builder.
            if not (builder_type == 'ci'
                    and builder in builders_module.FAKE_CI_BUILDERS):
                logging.warning(
                    'Did not get results for "%s", but this may be because its results '
                    'do not apply to any expectations for this suite.',
                    builder)
            return results

        for r in query_results:
            if not self._check_webgl_version(r['typ_tags']):
                continue
            build_id = _StripPrefixFromBuildId(r['id'])
            test_name = _StripPrefixFromTestId(r['test_id'])
            actual_result = _ConvertActualResultToExpectationFileFormat(
                r['status'])
            tags = r['typ_tags']
            step = r['step_name']
            results.append(
                data_types.Result(test_name, tags, actual_result, step,
                                  build_id))
        logging.debug('Got %d results for %s builder %s', len(results),
                      builder_type, builder)
        return results
Example #10
0
 def testEquality(self):
     r = GENERIC_RESULT
     other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                               'pixel_tests', 'build_id')
     self.assertEqual(r, other)
     other = data_types.Result('test2', ['tag1', 'tag2'], 'Pass',
                               'pixel_tests', 'build_id')
     self.assertNotEqual(r, other)
     other = data_types.Result('test', ['tag1'], 'Pass', 'pixel_tests',
                               'build_id')
     self.assertNotEqual(r, other)
     other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                               'pixel_tests', 'build_id')
     self.assertNotEqual(r, other)
     other = data_types.Result('test', ['tag1', 'tag2'], 'Failure',
                               'pixel_tests', 'build_id')
     self.assertNotEqual(r, other)
     other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                               'webgl_tests', 'build_id')
     self.assertNotEqual(r, other)
     other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                               'pixel_tests', 'other_build_id')
     self.assertNotEqual(r, other)
     other = data_types.Expectation('test', ['tag1', 'tag2'], 'Pass')
     self.assertNotEqual(r, other)
Example #11
0
 def testAppliesToResultApplies(self):
   r = data_types.Result('test', ['tag1', 'tag2'], 'Pass', 'pixel_tests',
                         'build_id')
   # Exact name match, exact tag match.
   e = GENERIC_EXPECTATION
   self.assertTrue(e.AppliesToResult(r))
   # Glob name match, exact tag match.
   e = data_types.Expectation('te*', ['tag1', 'tag2'], 'Pass')
   self.assertTrue(e.AppliesToResult(r))
   # Exact name match, tag subset match.
   e = data_types.Expectation('test', ['tag1'], 'Pass')
   self.assertTrue(e.AppliesToResult(r))
   # Expected result subset match.
   r = data_types.Result('test', ['tag1', 'tag2'], 'Pass', 'pixel_tests',
                         'build_id')
   e = GENERIC_EXPECTATION
   self.assertTrue(e.AppliesToResult(r))
   e = data_types.Expectation('test', ['tag1', 'tag2'], ['RetryOnFailure'])
   self.assertTrue(e.AppliesToResult(r))
Example #12
0
 def testResultNoMatch(self):
   """Tests that a result is not added if no match is found."""
   r = data_types.Result('some/test/case', ['win', 'win10'], 'Failure',
                         'pixel_tests', 'build_id')
   e = data_types.Expectation('some/test/*', ['win10', 'foo'], 'Failure')
   expectation_map = {'some/test/*': {e: {}}}
   found_matching = queries._AddResultToMap(r, 'builder', expectation_map)
   self.assertFalse(found_matching)
   expected_expectation_map = {'some/test/*': {e: {}}}
   self.assertEqual(expectation_map, expected_expectation_map)
Example #13
0
    def testMismatches(self):
        """Tests that unmatched results get returned."""
        foo_match_result = data_types.Result('foo/test', ['win10'], 'Pass',
                                             'pixel_tests', 'build_id')
        foo_mismatch_result = data_types.Result('foo/not_a_test', ['win10'],
                                                'Failure', 'pixel_tests',
                                                'build_id')
        bar_result = data_types.Result('bar/test', ['win10'], 'Pass',
                                       'pixel_tests', 'build_id')
        expectation_map = self.GetEmptyMapForGenericFailureExpectation()
        unmatched_results = expectations.AddResultListToMap(
            expectation_map, 'builder',
            [foo_match_result, foo_mismatch_result, bar_result])
        self.assertEqual(len(set(unmatched_results)), 2)
        self.assertEqual(set(unmatched_results),
                         set([foo_mismatch_result, bar_result]))

        expected_expectation_map = self.GetPassedMapForExpectation(
            self.GetGenericFailureExpectation())
        self.assertEqual(expectation_map, expected_expectation_map)
Example #14
0
 def testAppliesToResultDoesNotApply(self):
     r = data_types.Result('test', ['tag1', 'tag2'], 'Pass', 'pixel_tests',
                           'build_id')
     # Exact name mismatch.
     e = data_types.Expectation('te', ['tag1', 'tag2'], 'Pass')
     self.assertFalse(e.AppliesToResult(r))
     # Glob name mismatch.
     e = data_types.Expectation('ta*', ['tag1', 'tag2'], 'Pass')
     self.assertFalse(e.AppliesToResult(r))
     # Tags subset mismatch.
     e = data_types.Expectation('test', ['tag3'], 'Pass')
     self.assertFalse(e.AppliesToResult(r))
Example #15
0
    def testFailureFailureMatching(self):
        """Tests when there are failure results for failure expectations."""
        foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
                                       'pixel_tests', 'build_id')
        expectation_map = self.GetEmptyMapForGenericFailureExpectation()
        unmatched_results = expectations.AddResultListToMap(
            expectation_map, 'builder', [foo_result])
        self.assertEqual(unmatched_results, [])

        expected_expectation_map = self.GetFailedMapForExpectation(
            self.GetGenericFailureExpectation())
        self.assertEqual(expectation_map, expected_expectation_map)
Example #16
0
    def testRetryOnlyFailMatching(self):
        """Tests when the only tests are retry expectations that fail and match."""
        foo_result = data_types.Result('foo/test', ['win10'], 'Failure',
                                       'pixel_tests', 'build_id')
        expectation_map = self.GetEmptyMapForGenericRetryExpectation()
        unmatched_results = expectations.AddResultListToMap(
            expectation_map, 'builder', [foo_result])
        self.assertEqual(unmatched_results, [])

        expected_expectation_map = self.GetFailedMapForExpectation(
            self.GetGenericRetryExpectation())
        self.assertEqual(expectation_map, expected_expectation_map)
Example #17
0
 def testEquality(self):
     e = GENERIC_EXPECTATION
     other = data_types.Expectation('test', ['tag1', 'tag2'], 'Pass')
     self.assertEqual(e, other)
     other = data_types.Expectation('test2', ['tag1', 'tag2'], 'Pass')
     self.assertNotEqual(e, other)
     other = data_types.Expectation('test', ['tag1'], 'Pass')
     self.assertNotEqual(e, other)
     other = data_types.Expectation('test', ['tag1', 'tag2'], 'Failure')
     self.assertNotEqual(e, other)
     other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                               'pixel_tests', 'build_id')
     self.assertNotEqual(e, other)
    def testValidResults(self):
        """Tests functionality when valid results are returned by the query."""
        def SideEffect(builder, *args):
            del args
            if builder == 'matched_builder':
                return [
                    data_types.Result('foo', ['win'], 'Pass', 'step_name',
                                      'build_id')
                ]
            else:
                return [
                    data_types.Result('bar', [], 'Pass', 'step_name',
                                      'build_id')
                ]

        self._query_mock.side_effect = SideEffect

        expectation = data_types.Expectation('foo', ['win'], 'RetryOnFailure')
        expectation_map = data_types.TestExpectationMap({
            'foo':
            data_types.ExpectationBuilderMap({
                expectation:
                data_types.BuilderStepMap(),
            }),
        })
        unmatched_results = self._querier._FillExpectationMapForBuilders(
            expectation_map, ['matched_builder', 'unmatched_builder'], 'ci')
        stats = data_types.BuildStats()
        stats.AddPassedBuild()
        expected_expectation_map = {
            'foo': {
                expectation: {
                    'ci:matched_builder': {
                        'step_name': stats,
                    },
                },
            },
        }
        self.assertEqual(expectation_map, expected_expectation_map)
        self.assertEqual(
            unmatched_results, {
                'ci:unmatched_builder': [
                    data_types.Result('bar', [], 'Pass', 'step_name',
                                      'build_id'),
                ],
            })
Example #19
0
 def testMinimalData(self):
   """Tests that everything functions when minimal data is provided."""
   unmatched_results = {
       'builder': [
           data_types.Result('foo', [], 'Failure', None, 'build_id'),
       ],
   }
   expected_output = {
       'foo': {
           'builder': {
               None: [
                   'Got "Failure" on http://ci.chromium.org/b/build_id with '
                   'tags []',
               ],
           },
       },
   }
   output = result_output._ConvertUnmatchedResultsToStringDict(
       unmatched_results)
   self.assertEqual(output, expected_output)
Example #20
0
 def testRegularData(self):
   """Tests that everything functions when regular data is provided."""
   unmatched_results = {
       'builder': [
           data_types.Result('foo', ['win', 'intel'], 'Failure', 'step_name',
                             'build_id')
       ],
   }
   expected_output = {
       'foo': {
           'builder': {
               'step_name': [
                   'Got "Failure" on http://ci.chromium.org/b/build_id with '
                   'tags [win intel]',
               ]
           }
       }
   }
   output = result_output._ConvertUnmatchedResultsToStringDict(
       unmatched_results)
   self.assertEqual(output, expected_output)
Example #21
0
def AddResultListToMap(expectation_map, builder, results):
    """Adds |results| to |expectation_map|.

  Args:
    expectation_map: A data_types.TestExpectationMap. Will be modified in-place.
    builder: A string containing the builder |results| came from. Should be
        prefixed with something to distinguish between identically named CI and
        try builders.
    results: A list of data_types.Result objects corresponding to the ResultDB
        data queried for |builder|.

  Returns:
    A list of data_types.Result objects who did not have a matching expectation
    in |expectation_map|.
  """
    assert isinstance(expectation_map, data_types.TestExpectationMap)
    failure_results = set()
    pass_results = set()
    unmatched_results = []
    for r in results:
        if r.actual_result == 'Pass':
            pass_results.add(r)
        else:
            failure_results.add(r)

    # Remove any cases of failure -> pass from the passing set. If a test is
    # flaky, we get both pass and failure results for it, so we need to remove the
    # any cases of a pass result having a corresponding, earlier failure result.
    modified_failing_retry_results = set()
    for r in failure_results:
        modified_failing_retry_results.add(
            data_types.Result(r.test, r.tags, 'Pass', r.step, r.build_id))
    pass_results -= modified_failing_retry_results

    for r in pass_results | failure_results:
        found_matching = _AddResultToMap(r, builder, expectation_map)
        if not found_matching:
            unmatched_results.append(r)

    return unmatched_results
Example #22
0
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import unittest

import mock

from unexpected_passes import data_types

GENERIC_EXPECTATION = data_types.Expectation('test', ['tag1', 'tag2'],
                                             ['Pass'])
GENERIC_RESULT = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                                   'pixel_tests', 'build_id')


class ExpectationUnittest(unittest.TestCase):
    def testEquality(self):
        e = GENERIC_EXPECTATION
        other = data_types.Expectation('test', ['tag1', 'tag2'], 'Pass')
        self.assertEqual(e, other)
        other = data_types.Expectation('test2', ['tag1', 'tag2'], 'Pass')
        self.assertNotEqual(e, other)
        other = data_types.Expectation('test', ['tag1'], 'Pass')
        self.assertNotEqual(e, other)
        other = data_types.Expectation('test', ['tag1', 'tag2'], 'Failure')
        self.assertNotEqual(e, other)
        other = data_types.Result('test', ['tag1', 'tag2'], 'Pass',
                                  'pixel_tests', 'build_id')
        self.assertNotEqual(e, other)
Example #23
0
    def QueryBuilder(self, builder, builder_type):
        """Queries ResultDB for results from |builder|.

    Args:
      builder: A string containing the name of the builder to query.
      builder_type: A string containing the type of builder to query, either
          "ci" or "try".

    Returns:
      The results returned by the query converted into a list of
      data_types.Resultobjects.
    """

        test_filter = self._GetTestFilterForBuilder(builder, builder_type)
        if not test_filter:
            # No affected tests on this builder, so early return.
            return []

        # Query for the test data from the builder, splitting the query if we run
        # into the BigQuery hard memory limit. Even if we keep failing, this will
        # eventually stop due to getting a QuerySplitError when we can't split the
        # query any further.
        query_results = None
        while query_results is None:
            try:
                queries = []
                for tfc in test_filter.GetClauses():
                    query = GPU_BQ_QUERY_TEMPLATE.format(
                        builder_type=builder_type,
                        test_filter_clause=tfc,
                        suite=self._suite)
                    queries.append(query)

                query_results = self._RunBigQueryCommandsForJsonOutput(
                    queries, {
                        '': {
                            'builder_name': builder
                        },
                        'INT64': {
                            'num_builds': self._num_samples
                        }
                    })
            except MemoryLimitError:
                logging.warning(
                    'Query to builder %s hit BigQuery hard memory limit, trying again '
                    'with more query splitting.', builder)
                test_filter.SplitFilter()

        results = []
        if not query_results:
            # Don't bother logging if we know this is a fake CI builder.
            if not (builder_type == 'ci'
                    and builder in builders_module.FAKE_CI_BUILDERS):
                logging.warning(
                    'Did not get results for "%s", but this may be because its '
                    'results do not apply to any expectations for this suite.',
                    builder)
            return results

        for r in query_results:
            if not self._check_webgl_version(r['typ_tags']):
                continue
            build_id = _StripPrefixFromBuildId(r['id'])
            test_name = _StripPrefixFromTestId(r['test_id'])
            actual_result = _ConvertActualResultToExpectationFileFormat(
                r['status'])
            tags = r['typ_tags']
            step = r['step_name']
            results.append(
                data_types.Result(test_name, tags, actual_result, step,
                                  build_id))
        logging.debug('Got %d results for %s builder %s', len(results),
                      builder_type, builder)
        return results
Example #24
0
 def testWildcardsDisallowed(self):
     with self.assertRaises(AssertionError):
         data_types.Result('*', ['tag1'], 'Pass', 'pixel_tests', 'build_id')
Example #25
0
def QueryBuilder(builder, builder_type, suite, project, num_samples):
    """Queries ResultDB for results from |builder|.

  Args:
    builder: A string containing the name of the builder to query.
    builder_type: A string containing the type of builder to query, either "ci"
        or "try".
    suite: A string containing the name of the suite that is being queried for.
    project: A string containing the billing project to use for BigQuery.
    num_samples: An integer containing the number of builds to pull results
        from.

  Returns:
    A tuple (builder, results). |builder| is simply the value of the input
    |builder| argument, returned to facilitate parallel execution. |results| is
    the results returned by the query converted into a list of data_types.Result
    objects.
  """
    num_samples = num_samples or DEFAULT_NUM_SAMPLES
    assert num_samples > 0

    # WebGL 1 and 2 tests are technically the same suite, but have different
    # expectation files. This leads to us getting both WebGL 1 and 2 results when
    # we only have expectations for one of them, which causes all the results from
    # the other to be reported as not having a matching expectation.
    # TODO(crbug.com/1140283): Remove this once WebGL expectations are merged
    # and there's no need to differentiate them.
    if 'webgl_conformance' in suite:
        webgl_version = suite[-1]
        suite = 'webgl_conformance'
        check_webgl_version =\
            lambda tags: 'webgl-version-%s' % webgl_version in tags
    else:
        check_webgl_version = lambda tags: True

    # Most test names are |suite|_integration_test, but there are several that
    # are not reported that way in typ, and by extension ResultDB, so adjust that
    # here.
    suite = TELEMETRY_SUITE_TO_RDB_SUITE_EXCEPTION_MAP.get(
        suite, suite + '_integration_test')

    query = GPU_BQ_QUERY_TEMPLATE.format(builder_type=builder_type,
                                         suite=suite)
    cmd = [
        'bq',
        'query',
        '--max_rows=%d' % MAX_ROWS,
        '--format=json',
        '--project_id=%s' % project,
        '--use_legacy_sql=false',
        '--parameter=builder_name::%s' % builder,
        '--parameter=num_builds:INT64:%d' % num_samples,
        query,
    ]
    with open(os.devnull, 'w') as devnull:
        try:
            stdout = subprocess.check_output(cmd, stderr=devnull)
        except subprocess.CalledProcessError as e:
            logging.error(e.output)
            raise

    query_results = json.loads(stdout)
    results = []
    if not query_results:
        # Don't bother logging if we know this is a fake CI builder.
        if not (builder_type == 'ci'
                and builder in builders_module.FAKE_CI_BUILDERS):
            logging.warning(
                'Did not get results for "%s", but this may be because its results '
                'do not apply to any expectations for this suite.', builder)
        return (builder, results)

    for r in query_results:
        if not check_webgl_version(r['typ_tags']):
            continue
        build_id = _StripPrefixFromBuildId(r['id'])
        test_name = _StripPrefixFromTestId(r['test_id'])
        actual_result = _ConvertActualResultToExpectationFileFormat(
            r['status'])
        tags = r['typ_tags']
        step = r['step_name']
        results.append(
            data_types.Result(test_name, tags, actual_result, step, build_id))
    logging.debug('Got %d results for %s builder %s', len(results),
                  builder_type, builder)
    return (builder, results)
Example #26
0
def QueryBuilder(builder, builder_type, suite, project, num_samples,
                 large_query_mode):
    """Queries ResultDB for results from |builder|.

  Args:
    builder: A string containing the name of the builder to query.
    builder_type: A string containing the type of builder to query, either "ci"
        or "try".
    suite: A string containing the name of the suite that is being queried for.
    project: A string containing the billing project to use for BigQuery.
    num_samples: An integer containing the number of builds to pull results
        from.
    large_query_mode: A boolean indicating whether large query mode should be
        used. In this mode, an initial, smaller query is made and its results
        are used to perform additional filtering on a second, larger query in
        BigQuery. This works around hitting a hard memory limit when running the
        ORDER BY clause.

  Returns:
    A tuple (builder, results). |builder| is simply the value of the input
    |builder| argument, returned to facilitate parallel execution. |results| is
    the results returned by the query converted into a list of data_types.Result
    objects.
  """
    num_samples = num_samples or DEFAULT_NUM_SAMPLES
    assert num_samples > 0

    # WebGL 1 and 2 tests are technically the same suite, but have different
    # expectation files. This leads to us getting both WebGL 1 and 2 results when
    # we only have expectations for one of them, which causes all the results from
    # the other to be reported as not having a matching expectation.
    # TODO(crbug.com/1140283): Remove this once WebGL expectations are merged
    # and there's no need to differentiate them.
    if 'webgl_conformance' in suite:
        webgl_version = suite[-1]
        suite = 'webgl_conformance'
        check_webgl_version =\
            lambda tags: 'webgl-version-%s' % webgl_version in tags
    else:
        check_webgl_version = lambda tags: True

    # Most test names are |suite|_integration_test, but there are several that
    # are not reported that way in typ, and by extension ResultDB, so adjust that
    # here.
    suite = TELEMETRY_SUITE_TO_RDB_SUITE_EXCEPTION_MAP.get(
        suite, suite + '_integration_test')

    test_filter_clause = _GetTestFilterClauseForBuilder(
        builder, builder_type, suite, project, large_query_mode)
    if test_filter_clause is None:
        # No affected tests on this builder, so early return.
        return (builder, [])

    query = GPU_BQ_QUERY_TEMPLATE.format(builder_type=builder_type,
                                         test_filter_clause=test_filter_clause,
                                         suite=suite)

    query_results = _RunBigQueryCommandForJsonOutput(query, project, {
        '': {
            'builder_name': builder
        },
        'INT64': {
            'num_builds': num_samples
        }
    })
    results = []
    if not query_results:
        # Don't bother logging if we know this is a fake CI builder.
        if not (builder_type == 'ci'
                and builder in builders_module.FAKE_CI_BUILDERS):
            logging.warning(
                'Did not get results for "%s", but this may be because its results '
                'do not apply to any expectations for this suite.', builder)
        return (builder, results)

    for r in query_results:
        if not check_webgl_version(r['typ_tags']):
            continue
        build_id = _StripPrefixFromBuildId(r['id'])
        test_name = _StripPrefixFromTestId(r['test_id'])
        actual_result = _ConvertActualResultToExpectationFileFormat(
            r['status'])
        tags = r['typ_tags']
        step = r['step_name']
        results.append(
            data_types.Result(test_name, tags, actual_result, step, build_id))
    logging.debug('Got %d results for %s builder %s', len(results),
                  builder_type, builder)
    return (builder, results)