Beispiel #1
0
  def __init__(self, max_failures=None):
    """Creates a new Benchmark.

    Args:
      max_failures: The number of story run's failures before bailing
          from executing subsequent page runs. If None, we never bail.
    """
    self._expectations = typ_expectations.StoryExpectations(self.Name())
    self._max_failures = max_failures
 def testDisableBenchmark(self):
     expectations = typ_expectations.StoryExpectations(
         'fake_benchmark_name')
     raw_expectations = (
         '# tags: [ all ]\n'
         '# results: [ Skip ]\n'
         'crbug.com/123 [ all ] fake_benchmark_name/* [ Skip ]\n')
     expectations.GetBenchmarkExpectationsFromParser(raw_expectations)
     expectations.SetTags(['All'])
     reason = expectations.IsBenchmarkDisabled()
     self.assertTrue(reason)
     self.assertEqual(reason, 'crbug.com/123')
Beispiel #3
0
 def BuildStoryFilter(cls, benchmark_name, platform_tags,
                      abridged_story_set_tag):
   expectations = typ_expectations.StoryExpectations(benchmark_name)
   expectations.SetTags(platform_tags or [])
   if cls._expectations_file and os.path.exists(cls._expectations_file):
     with open(cls._expectations_file) as fh:
       expectations.GetBenchmarkExpectationsFromParser(fh.read())
   if not cls._run_abridged_story_set:
     abridged_story_set_tag = None
   return StoryFilter(
       expectations, abridged_story_set_tag, cls._story_filter,
       cls._story_filter_exclude,
       cls._story_tag_filter, cls._story_tag_filter_exclude,
       cls._shard_begin_index, cls._shard_end_index, cls._run_disabled_stories,
       stories=cls._stories)
 def testDisableStory_NoReasonGiven(self):
     expectations = typ_expectations.StoryExpectations(
         'fake_benchmark_name')
     raw_expectations = (
         '# tags: [ linux win ]\n'
         '# results: [ Skip ]\n'
         '[ linux ] fake_benchmark_name/one [ Skip ]\n'
         'crbug.com/123 [ win ] fake_benchmark_name/on* [ Skip ]\n')
     expectations.GetBenchmarkExpectationsFromParser(raw_expectations)
     expectations.SetTags(['linux'])
     story = mock.MagicMock()
     story.name = 'one'
     reason = expectations.IsStoryDisabled(story)
     self.assertTrue(reason)
     self.assertEqual(reason, 'No reason given')
Beispiel #5
0
 def AugmentExpectationsWithFile(self, raw_data):
   typ_parser = typ_expectations_parser.TestExpectations()
   error, _ = typ_parser.parse_tagged_list(raw_data)
   if not error:
     self._expectations = typ_expectations.StoryExpectations(self.Name())
     self._expectations.GetBenchmarkExpectationsFromParser(raw_data)
   else:
     # If we can't parse the file using typ's expectation parser
     # then we fall back to using py_util's expectation parser
     # TODO(crbug.com/973936): When all expectations files have
     # been migrated, we will remove this if else statement and
     # only use typ's expectations parser.
     self._expectations.GetBenchmarkExpectationsFromParser(
         expectations_parser.TestExpectationParser(
             raw_data).expectations, self.Name())
Beispiel #6
0
 def testValidateTags(self):
   story_expectations = typ_expectations.StoryExpectations('fake')
   story_expectations.GetBenchmarkExpectationsFromParser('# tags: [ all ]')
   with self.assertRaises(ValueError) as context:
     story_expectations.SetTags(['abc'], True)
   self.assertIn('not declared in the expectations', str(context.exception))