Ejemplo n.º 1
0
    def testResolutionReturnedFromExpectationsFor(self):
        raw_data1 = (
            '# tags: [ linux ]\n'
            '# results: [ Failure RetryOnFailure Slow ]\n'
            '[ linux ] b1/s3 [ Failure ]\n'
            'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
            'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
        raw_data2 = ('# tags: [ Intel ]\n'
                     '# results: [ Pass RetryOnFailure Slow ]\n'
                     '[ intel ] b1/s1 [ RetryOnFailure ]\n'
                     'crbug.com/2432 [ intel ] b1/s2 [ Pass Slow ]\n'
                     'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
        raw_data3 = (
            '# tags: [ linux ]\n'
            '# results: [ Failure RetryOnFailure Slow ]\n'
            '# conflict_resolution: OVERRIDE\n'
            '[ linux ] b1/s3 [ Failure ]\n'
            'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
            'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
        test_exp1 = expectations_parser.TestExpectations(['Linux'])
        ret, _ = test_exp1.parse_tagged_list(raw_data1)
        self.assertEqual(ret, 0)
        self.assertEqual(
            test_exp1.expectations_for('b1/s2'),
            Expectation(test='b1/s2',
                        results={ResultType.Failure},
                        retry_on_failure=True,
                        is_slow_test=False,
                        reason='crbug.com/2431',
                        tags={'linux'},
                        conflict_resolution=ConflictResolutionTypes.UNION))

        test_exp2 = expectations_parser.TestExpectations(['Intel'])
        ret, _ = test_exp2.parse_tagged_list(
            raw_data2, conflict_resolution=ConflictResolutionTypes.OVERRIDE)
        self.assertEqual(ret, 0)
        self.assertEqual(
            test_exp2.expectations_for('b1/s2'),
            Expectation(test='b1/s2',
                        results={ResultType.Pass},
                        retry_on_failure=False,
                        is_slow_test=True,
                        reason='crbug.com/2432',
                        tags={'intel'},
                        conflict_resolution=ConflictResolutionTypes.OVERRIDE))

        test_exp3 = expectations_parser.TestExpectations(['Linux'])
        ret, _ = test_exp3.parse_tagged_list(raw_data3)
        self.assertEqual(ret, 0)
        self.assertEqual(
            test_exp3.expectations_for('b1/s2'),
            Expectation(test='b1/s2',
                        results={ResultType.Failure},
                        retry_on_failure=True,
                        is_slow_test=False,
                        reason='crbug.com/2431',
                        tags={'linux'},
                        conflict_resolution=ConflictResolutionTypes.OVERRIDE))
 def testMergeExpectationsUsingOverrideResolution(self):
     raw_data1 = (
         '# tags: [ linux ]\n'
         '# results: [ Failure RetryOnFailure Slow ]\n'
         '[ linux ] b1/s3 [ Failure ]\n'
         'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ]\n'
         'crbug.com/2432 [ linux ] b1/s* [ Failure ]\n')
     raw_data2 = ('# tags: [ Intel ]\n'
                  '# results: [ Pass RetryOnFailure Slow ]\n'
                  '[ intel ] b1/s1 [ RetryOnFailure ]\n'
                  'crbug.com/2432 [ intel ] b1/s2 [ Pass Slow ]\n'
                  'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
     test_exp1 = expectations_parser.TestExpectations(['Linux'])
     ret, _ = test_exp1.parse_tagged_list(raw_data1)
     self.assertEqual(ret, 0)
     test_exp2 = expectations_parser.TestExpectations(['Intel'])
     ret, _ = test_exp2.parse_tagged_list(
         raw_data2,
         conflict_resolution=expectations_parser.ConflictResolutionTypes.
         OVERRIDE)
     self.assertEqual(ret, 0)
     test_exp1.merge_test_expectations(test_exp2)
     self.assertEqual(sorted(test_exp1.tags), ['intel', 'linux'])
     self.assertEqual(
         test_exp1.expectations_for('b1/s2'),
         Expectation(test='b1/s2',
                     results={ResultType.Pass},
                     retry_on_failure=False,
                     is_slow_test=True,
                     reason='crbug.com/2432',
                     tags={'intel'}))
     self.assertEqual(
         test_exp1.expectations_for('b1/s1'),
         Expectation(test='b1/s1',
                     results={ResultType.Pass},
                     retry_on_failure=True,
                     is_slow_test=False,
                     tags={'intel'}))
     self.assertEqual(
         test_exp1.expectations_for('b1/s3'),
         Expectation(test='b1/s3',
                     results={ResultType.Failure},
                     retry_on_failure=False,
                     is_slow_test=False,
                     tags={'linux'}))
     self.assertEqual(
         test_exp1.expectations_for('b1/s5'),
         Expectation(test='b1/s5',
                     results={ResultType.Pass},
                     retry_on_failure=True,
                     is_slow_test=False,
                     reason='crbug.com/2431',
                     tags={'intel'}))
    def testWebglTestExpectationsForDriverTags(self):
        webgl_conformance_test_class = (
            webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
        expectations_driver_tags = set()
        for webgl_version in range(1, 3):
            _ = list(
                webgl_conformance_test_class.GenerateGpuTests(
                    gpu_helper.GetMockArgs(webgl_version=('%d.0.0' %
                                                          webgl_version))))
            with open(webgl_conformance_test_class.ExpectationsFiles()[0],
                      'r') as f:
                parser = expectations_parser.TestExpectations()
                parser.parse_tagged_list(f.read(), f.name)
                driver_tag_set = set()
                for tag_set in parser.tag_sets:
                    if gpu_helper.MatchDriverTag(list(tag_set)[0]):
                        for tag in tag_set:
                            match = gpu_helper.MatchDriverTag(tag)
                            assert match
                            if match.group(1) == 'intel':
                                if not check_intel_driver_version(
                                        match.group(3)):
                                    assert False, INTEL_DRIVER_VERSION_SCHEMA

                        assert not driver_tag_set
                        driver_tag_set = tag_set
                    else:
                        for tag in tag_set:
                            assert not gpu_helper.MatchDriverTag(tag)
                expectations_driver_tags |= driver_tag_set

        self.assertEqual(gpu_helper.ExpectationsDriverTags(),
                         expectations_driver_tags)
Ejemplo n.º 4
0
    def testWebglTestExpectationsForDriverTags(self):
        webgl_conformance_test_class = (
            webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
        expectations_driver_tags = set()
        for webgl_version in range(1, 3):
            _ = list(
                webgl_conformance_test_class.GenerateGpuTests(
                    gpu_helper.GetMockArgs(webgl_version=('%d.0.0' %
                                                          webgl_version))))
            with open(webgl_conformance_test_class.ExpectationsFiles()[0],
                      'r') as f:
                parser = expectations_parser.TestExpectations()
                parser.parse_tagged_list(f.read(), f.name)
                driver_tag_set = set()
                for tag_set in parser.tag_sets:
                    if gpu_helper.MatchDriverTag(list(tag_set)[0]):
                        for tag in tag_set:
                            match = gpu_helper.MatchDriverTag(tag)
                            self.assertIsNotNone(match)
                            if match.group(1) == 'intel':
                                self.assertTrue(
                                    check_intel_driver_version(match.group(3)))

                        self.assertSetEqual(driver_tag_set, set())
                        driver_tag_set = tag_set
                    else:
                        for tag in tag_set:
                            self.assertIsNone(gpu_helper.MatchDriverTag(tag))
                expectations_driver_tags |= driver_tag_set

        self.assertEqual(gpu_helper.ExpectationsDriverTags(),
                         expectations_driver_tags)
Ejemplo n.º 5
0
 def testExpectationWithGlobIsNotBroken(self):
     test_expectations = '# results: [ Failure ]\na/b* [ Failure ]'
     expectations = expectations_parser.TestExpectations()
     expectations.parse_tagged_list(test_expectations, 'test.txt')
     broken_expectations = expectations.check_for_broken_expectations(
         ['a/b/c'])
     self.assertFalse(broken_expectations)
 def testForBrokenWebglExtensionExpectations(self):
     webgl_test_class = (
         webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
     for webgl_version in xrange(1, 3):
         tests = [
             test[0] for test in webgl_test_class.GenerateGpuTests(
                 gpu_helper.GetMockArgs(webgl_version='%d.0.0' %
                                        webgl_version))
         ]
         with open(webgl_test_class.ExpectationsFiles()[0], 'r') as f:
             expectations = expectations_parser.TestExpectations()
             expectations.parse_tagged_list(f.read())
             patterns_to_exps = expectations.individual_exps.copy()
             patterns_to_exps.update(expectations.glob_exps)
             patterns_to_exps = {
                 k: v
                 for k, v in patterns_to_exps.items()
                 if k.lower().startswith('webglextension')
             }
             broken_expectations = expectations.get_broken_expectations(
                 patterns_to_exps, tests)
             msg = ''
             for ununsed_pattern in set(
                 [e.test for e in broken_expectations]):
                 msg += (
                     "Expectations with pattern '{0}' in {1} do not apply to any "
                     "webgl version {2} extension tests\n".format(
                         ununsed_pattern, os.path.basename(f.name),
                         webgl_version))
             assert not msg, msg
Ejemplo n.º 7
0
def CheckTestExpectationPatternsForConflicts(expectations, file_name):
    test_expectations = expectations_parser.TestExpectations()
    test_expectations.parse_tagged_list(expectations,
                                        file_name=file_name,
                                        tags_conflict=_DoTagsConflict)
    _MapGpuDevicesToVendors(test_expectations.tag_sets)
    return test_expectations.check_test_expectations_patterns_for_conflicts()
    def testWebglTestExpectationsForDriverTags(self):
        webgl_conformance_test_class = (
            webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
        expectations_driver_tags = set()
        for i in range(1, 3):
            _ = list(
                webgl_conformance_test_class.GenerateGpuTests(
                    gpu_helper.GetMockArgs(webgl_version=('%d.0.0' % i))))
            with open(webgl_conformance_test_class.ExpectationsFiles()[0],
                      'r') as f:
                parser = expectations_parser.TestExpectations()
                parser.parse_tagged_list(f.read(), f.name)
                driver_tag_set = set()
                for tag_set in parser.tag_sets:
                    if gpu_helper.MatchDriverTag(list(tag_set)[0]):
                        for tag in tag_set:
                            assert gpu_helper.MatchDriverTag(tag)
                        assert not driver_tag_set
                        driver_tag_set = tag_set
                    else:
                        for tag in tag_set:
                            assert not gpu_helper.MatchDriverTag(tag)
                expectations_driver_tags |= driver_tag_set

        self.assertEqual(gpu_helper.ExpectationsDriverTags(),
                         expectations_driver_tags)
Ejemplo n.º 9
0
 def testExpectationWithGlobIsBroken(self):
     test_expectations = '# results: [ Failure ]\na/b/d* [ Failure ]'
     expectations = expectations_parser.TestExpectations()
     expectations.parse_tagged_list(test_expectations, 'test.txt')
     broken_expectations = expectations.check_for_broken_expectations(
         ['a/b/c/d', 'a/b', 'a/b/c'])
     self.assertEqual(broken_expectations[0].test, 'a/b/d*')
Ejemplo n.º 10
0
 def testIsTestRetryOnFailure(self):
     raw_data = (
         '# tags: [ linux ]\n'
         '# results: [ Failure RetryOnFailure ]\n'
         '# conflicts_allowed: true\n'
         'crbug.com/23456 [ Linux ] b1/s1 [ Failure ]\n'
         'crbug.com/23456 [ Linux ] b1/s1 [ retryOnFailure ]\n'
         '[ linux ] b1/s2 [ RetryOnFailure ]\n'
         'crbug.com/24341 [ Linux ] b1/s3 [ Failure ]\n')
     test_expectations = expectations_parser.TestExpectations(['Linux'])
     self.assertEqual(
         test_expectations.parse_tagged_list(raw_data, 'test.txt'), (0,''))
     self.assertEqual(test_expectations.expectations_for('b1/s1'),
                      Expectation(
                          test='b1/s1', results={ResultType.Failure, ResultType.Pass}, retry_on_failure=True,
                          is_slow_test=False, reason='crbug.com/23456'))
     self.assertEqual(test_expectations.expectations_for('b1/s2'),
                      Expectation(
                          test='b1/s2', results={ResultType.Pass}, retry_on_failure=True,
                          is_slow_test=False))
     self.assertEqual(test_expectations.expectations_for('b1/s3'),
                      Expectation(
                          test='b1/s3', results={ResultType.Failure}, retry_on_failure=False,
                          is_slow_test=False, reason='crbug.com/24341'))
     self.assertEqual(test_expectations.expectations_for('b1/s4'),
                      Expectation(
                          test='b1/s4', results={ResultType.Pass}, retry_on_failure=False,
                          is_slow_test=False))
Ejemplo n.º 11
0
    def testForBrokenWebglExtensionExpectations(self):
        webgl_test_class = (
            webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
        for webgl_version in xrange(1, 3):
            tests = [
                test[0] for test in webgl_test_class.GenerateGpuTests(
                    gpu_helper.GetMockArgs(webgl_version='%d.0.0' %
                                           webgl_version))
            ]
            with open(webgl_test_class.ExpectationsFiles()[0], 'r') as f:
                expectations = expectations_parser.TestExpectations()
                expectations.parse_tagged_list(f.read())

                # remove non webgl extension expectations
                for test in expectations.individual_exps.keys():
                    if not test.lower().startswith('webglextension'):
                        expectations.individual_exps.pop(test)
                for test in expectations.glob_exps.keys():
                    if not test.lower().startswith('webglextension'):
                        expectations.glob_exps.pop(test)

                broken_expectations = expectations.check_for_broken_expectations(
                    tests)
                msg = ''
                for ununsed_pattern in set(
                    [e.test for e in broken_expectations]):
                    msg += (
                        "Expectations with pattern '{0}' in {1} do not apply to any "
                        "webgl version {2} extension tests\n".format(
                            ununsed_pattern, os.path.basename(f.name),
                            webgl_version))
                self.assertEqual(msg, '')
Ejemplo n.º 12
0
 def testMergeExpectationsUsingUnionResolution(self):
     raw_data1 = (
         '# tags: [ linux ]\n'
         '# results: [ Failure RetryOnFailure Slow ]\n'
         '[ linux ] b1/s3 [ Failure ]\n'
         'crbug.com/2431 [ linux ] b1/s2 [ Failure RetryOnFailure ] # c1\n'
         'crbug.com/2432 [ linux ] b1/s* [ Failure Slow ]\n')
     raw_data2 = ('# tags: [ Intel ]\n'
                  '# results: [ Pass RetryOnFailure ]\n'
                  '[ intel ] b1/s1 [ RetryOnFailure ]\n'
                  'crbug.com/2432 [ intel ] b1/s2 [ Pass ] # c2\n'
                  'crbug.com/2431 [ intel ] b1/s* [ RetryOnFailure ]\n')
     test_exp1 = expectations_parser.TestExpectations(['Linux'])
     ret, _ = test_exp1.parse_tagged_list(raw_data1)
     self.assertEqual(ret, 0)
     test_exp2 = expectations_parser.TestExpectations(['Intel'])
     ret, _ = test_exp2.parse_tagged_list(raw_data2)
     self.assertEqual(ret, 0)
     test_exp1.merge_test_expectations(test_exp2)
     self.assertEqual(sorted(test_exp1.tags), ['intel', 'linux'])
     self.assertEqual(
         test_exp1.expectations_for('b1/s2'),
         Expectation(test='b1/s2',
                     results={ResultType.Pass, ResultType.Failure},
                     retry_on_failure=True,
                     is_slow_test=False,
                     reason='crbug.com/2431 crbug.com/2432',
                     trailing_comments=' # c1\n # c2\n'))
     self.assertEqual(
         test_exp1.expectations_for('b1/s1'),
         Expectation(test='b1/s1',
                     results={ResultType.Pass},
                     retry_on_failure=True,
                     is_slow_test=False))
     self.assertEqual(
         test_exp1.expectations_for('b1/s3'),
         Expectation(test='b1/s3',
                     results={ResultType.Failure},
                     retry_on_failure=False,
                     is_slow_test=False))
     self.assertEqual(
         test_exp1.expectations_for('b1/s5'),
         Expectation(test='b1/s5',
                     results={ResultType.Failure},
                     retry_on_failure=True,
                     is_slow_test=True,
                     reason='crbug.com/2431 crbug.com/2432'))
Ejemplo n.º 13
0
 def testGetExpectationsFromGlob(self):
     raw_data = ('# tags: [ Linux ]\n'
                 '# results: [ Failure ]\n'
                 'crbug.com/23456 [ linux ] b1/s1* [ Failure ]\n')
     expectations = expectations_parser.TestExpectations(tags=['linux'])
     expectations.parse_tagged_list(raw_data)
     exp = expectations.expectations_for('b1/s1')
     self.assertEqual(exp.results, set([ResultType.Failure]))
Ejemplo n.º 14
0
 def testIsTestRetryOnFailureUsingGlob(self):
     raw_data = ('# tags: [ Linux ]\n'
                 'crbug.com/23456 [ Linux ] b1/* [ RetryOnFailure ]\n')
     test_expectations = expectations_parser.TestExpectations(['Linux'])
     self.assertEqual(test_expectations.parse_tagged_list(raw_data),
                      (0, None))
     self.assertEqual(test_expectations.expectations_for('b1/s1'),
                      (set([ResultType.Pass]), True))
Ejemplo n.º 15
0
 def testConflictNotFoundRegardlessOfTagCase(self):
     test_expectations = '''# tags: [ InTel AMD nvidia ]
     # results: [ Failure ]
     [ intel ] a/b/c/d [ Failure ]
     [ amd ] a/b/c/d [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
     self.assertFalse(msg)
Ejemplo n.º 16
0
 def testIgnoredTags(self):
     test_expectations = """# tags: [ foo ]
     # results: [ Failure ]
     """
     expectations = expectations_parser.TestExpectations(
         ignored_tags=['ignored'])
     _, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
     self.assertFalse(msg)
     expectations.set_tags(['ignored'], raise_ex_for_bad_tags=True)
Ejemplo n.º 17
0
 def testGetExpectationsFromGlobShorterThanLongestMatchingGlob(self):
     raw_data = ('# tags: [ Linux Mac ]\n'
                 '# results: [ Failure Pass ]\n'
                 'crbug.com/23456 [ linux ] b1/s1* [ Failure ]\n'
                 'crbug.com/23456 [ mac ] b1/* [ Pass ]\n')
     expectations = expectations_parser.TestExpectations(tags=['mac'])
     expectations.parse_tagged_list(raw_data)
     exp = expectations.expectations_for('b1/s1')
     self.assertEqual(exp.results, set([ResultType.Pass]))
Ejemplo n.º 18
0
 def testFileNameExcludedFromErrorMessageForExpectationConflicts(self):
     test_expectations = '''# tags: [ mac ]
     # tags: [ intel ]
     # results: [ Failure ]
     [ intel ] a/b/c/d [ Failure ]
     [ mac ] a/b/c/d [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, errors = expectations.parse_tagged_list(test_expectations)
     self.assertIn("Found conflicts for test a/b/c/d:", errors)
Ejemplo n.º 19
0
 def testDeclaredSystemConditionTagsDontRaiseAnException(self):
     test_expectations = '''# tags: [ InTel AMD nvidia nvidia-0x1010 ]
     # tags: [ win ]
     # results: [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
     self.assertFalse(msg)
     expectations.set_tags(['win', 'nVidia', 'nvidia-0x1010'],
                           raise_ex_for_bad_tags=True)
Ejemplo n.º 20
0
 def testMultipleReasonsForExpectation(self):
     test_expectations = '''# results: [ Failure ]
     skbug.com/111 crbug.com/lpz/222 skbug.com/hello/333 crbug.com/444 test [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, msg = expectations.parse_tagged_list(
         test_expectations, 'test.txt')
     self.assertFalse(msg)
     exp = expectations.expectations_for('test')
     self.assertEqual(exp.reason, 'skbug.com/111 crbug.com/lpz/222 skbug.com/hello/333 crbug.com/444')
Ejemplo n.º 21
0
    def testRetryOnFailureDefaultPassAndFailure(self):
        raw_data = ('# tags: [ Linux ]\n'
                    '# results: [ Failure RetryOnFailure ]\n'
                    'crbug.com/23456 [ Linux ] b1/s1 [ Failure ]\n'
                    'crbug.com/23456 b1/s1 [ RetryOnFailure ]\n')

        expectations = expectations_parser.TestExpectations(tags=['linux'])
        expectations.parse_tagged_list(raw_data)
        exp = expectations.expectations_for('b1/s1')
        self.assertEqual(exp.results, set([ResultType.Failure]))
        self.assertFalse(exp.is_default_pass)
        self.assertTrue(exp.should_retry_on_failure)

        expectations = expectations_parser.TestExpectations(tags=['win'])
        expectations.parse_tagged_list(raw_data)
        exp = expectations.expectations_for('b1/s1')
        self.assertEqual(exp.results, set([ResultType.Pass]))
        self.assertTrue(exp.is_default_pass)
        self.assertTrue(exp.should_retry_on_failure)
Ejemplo n.º 22
0
 def testConflictFoundRegardlessOfTagCase(self):
     test_expectations = '''# tags: [ InTel AMD nvidia ]
     # results: [ Failure ]
     [ intel ] a/b/c/d [ Failure ]
     [ Intel ] a/b/c/d [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     ret, msg = expectations.parse_tagged_list(
         test_expectations, 'test.txt')
     self.assertTrue(ret)
     self.assertIn('Found conflicts for pattern a/b/c/d', msg)
Ejemplo n.º 23
0
 def testUseIncorrectvalueForConflictsAllowedDescriptor(self):
     test_expectations = '''# tags: [ mac win linux ]
     # tags: [ intel amd nvidia ]
     # tags: [ debug release ]
     # results: [ Failure Skip ]
     # conflicts_allowed: Unknown
     '''
     expectations = expectations_parser.TestExpectations()
     _, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
     self.assertEqual("5: Unrecognized value 'unknown' "
                      "given for conflicts_allowed descriptor", msg)
Ejemplo n.º 24
0
 def testValidateStoryInValidName(self):
     raw_expectations = ('# tags: [ Mac ]\n'
                         '# results: [ Skip ]\n'
                         'crbug.com/123 [ Mac ] b1/s1 [ Skip ]\n')
     test_expectations = typ_expectations_parser.TestExpectations()
     ret, _ = test_expectations.parse_tagged_list(raw_expectations)
     self.assertFalse(ret)
     benchmarks = [FakeBenchmark]
     with self.assertRaises(AssertionError):
         story_expectation_validator.validate_story_names(
             benchmarks, test_expectations)
Ejemplo n.º 25
0
 def testConflictsAllowedIsSetToTrue(self):
     test_expectations = '''# tags: [ mac win linux ]
     # tags: [ intel amd nvidia ]
     # tags: [ debug release ]
     # results: [ Failure ]
     # conflicts_allowed: True
     [ intel debug ] a/b/c/d [ Failure ]
     [ intel ] a/b/c/d [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, msg = expectations.parse_tagged_list(test_expectations, 'test.txt')
     self.assertFalse(msg)
Ejemplo n.º 26
0
def main():
    benchmarks = benchmark_finders.GetAllBenchmarks()
    with open(path_util.GetExpectationsPath()) as fp:
        raw_expectations_data = fp.read()
    test_expectations = typ_expectations_parser.TestExpectations()
    ret, msg = test_expectations.parse_tagged_list(raw_expectations_data)
    if ret:
        logging.error(msg)
        return ret
    validate_story_names(benchmarks, test_expectations)
    validate_expectations_component_tags(test_expectations)
    return 0
 def testValidateExpectationsComponentTags(self):
     raw_expectations = ('# tags: [ android mac ]\n'
                         '# tags: [ android-webview ]\n'
                         '# results: [ Skip ]\n'
                         'crbug.com/123 [ mac android-webview ]'
                         ' b1/s1 [ Skip ]\n')
     test_expectations = typ_expectations_parser.TestExpectations()
     ret, _ = test_expectations.parse_tagged_list(raw_expectations)
     self.assertFalse(ret)
     with self.assertRaises(AssertionError):
         story_expectation_validator.validate_expectations_component_tags(
             test_expectations)
Ejemplo n.º 28
0
 def testValidateStoryValidName(self):
     raw_expectations = ('# tags: [ Mac] \n'
                         '# results: [ Skip ]\n'
                         'crbug.com/123 [ Mac ] b1/One [ Skip ]\n')
     test_expectations = typ_expectations_parser.TestExpectations()
     ret, _ = test_expectations.parse_tagged_list(raw_expectations)
     self.assertFalse(ret)
     benchmarks = [FakeBenchmark]
     # If a name is invalid, an exception is thrown. If no exception is thrown
     # all story names are valid. That is why there is no assert here.
     story_expectation_validator.validate_story_names(
         benchmarks, test_expectations)
 def testWebglTestPathsExist(self):
   webgl_test_class = (
       webgl_conformance_integration_test.WebGLConformanceIntegrationTest)
   for webgl_version in xrange(1, 3):
     _ = list(
         webgl_test_class.GenerateGpuTests(
             gpu_helper.GetMockArgs(webgl_version='%d.0.0' % webgl_version)))
     with open(webgl_test_class.ExpectationsFiles()[0], 'r') as f:
       expectations = expectations_parser.TestExpectations()
       expectations.parse_tagged_list(f.read())
       for pattern, _ in expectations.individual_exps.items():
         _CheckWebglConformanceTestPathIsValid(pattern)
Ejemplo n.º 30
0
 def testNoCollisionInTestExpectations(self):
     test_expectations = '''# tags: [ mac win linux ]
     # tags: [ intel amd nvidia ]
     # tags: [ debug release ]
     # results: [ Failure ]
     # conflicts_allowed: False
     [ intel debug ] a/b/c/d [ Failure ]
     [ nvidia debug ] a/b/c/d [ Failure ]
     '''
     expectations = expectations_parser.TestExpectations()
     _, errors = expectations.parse_tagged_list(test_expectations,
                                                'test.txt')
     self.assertFalse(errors)