Example #1
0
    def GetExpectations(self):
        """Returns a StoryExpectation object.

    This object is used to determine what stories are disabled. This needs to be
    overridden by the subclass. It defaults to an empty expectations object.
    """
        return expectations.StoryExpectations()
Example #2
0
def GenerateSystemHealthCSV(file_path):
    system_health_stories = list(IterAllSystemHealthStories())

    e = expectations.StoryExpectations()
    with open(path_util.GetExpectationsPath()) as fp:
        parser = expectations_parser.TestExpectationParser(fp.read())

    benchmarks = [
        'system_health.common_desktop', 'system_health.common_mobile',
        'system_health.memory_desktop', 'system_health.memory_mobile'
    ]
    for benchmark in benchmarks:
        e.GetBenchmarkExpectationsFromParser(parser.expectations, benchmark)

    disabed_platforms = PopulateExpectations([e.AsDict()['stories']])

    system_health_stories.sort(key=lambda s: s.name)
    with open(file_path, 'w') as f:
        csv_writer = csv.writer(f)
        csv_writer.writerow(
            ['Story name', 'Platform', 'Description', 'Disabled Platforms'])
        for s in system_health_stories:
            p = s.SUPPORTED_PLATFORMS
            if len(p) == 2:
                p = 'all'
            else:
                p = list(p)[0]
            if s.name in disabed_platforms:
                csv_writer.writerow([
                    s.name, p,
                    s.GetStoryDescription(), disabed_platforms[s.name]
                ])
            else:
                csv_writer.writerow([s.name, p, s.GetStoryDescription(), " "])
    return 0
 def testGetBenchmarkExpectationsFromParserMultipleDisablesSameBenchmark(
         self):
     raw_data = [
         expectations_parser.Expectation('crbug.com/123',
                                         'benchmark1/story', ['Win'],
                                         ['Skip']),
         expectations_parser.Expectation('crbug.com/234',
                                         'benchmark2/story2', ['Win'],
                                         ['Skip']),
         expectations_parser.Expectation('crbug.com/345',
                                         'benchmark1/story', ['Mac'],
                                         ['Skip']),
     ]
     e = expectations.StoryExpectations()
     e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
     actual = self._ConvertTestConditionsToStrings(e.AsDict())
     expected = {
         'platforms': (),
         'stories': {
             'story': [
                 (['Win'], 'crbug.com/123'),
                 (['Mac'], 'crbug.com/345'),
             ],
         }
     }
     self.assertEqual(actual, expected)
 def testGetBenchmarkExpectationsFromParserUnmappedTag(self):
   raw_data = [
       expectations_parser.Expectation(
           'crbug.com/23456', 'benchmark1/story', ['Unmapped_Tag'], ['Skip']),
   ]
   e = expectations.StoryExpectations()
   with self.assertRaises(KeyError):
     e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
Example #5
0
    def __init__(self, max_failures=None):
        """Creates a new Benchmark.

    Args:
      max_failures: The number of story run's failures before bailing
          from executing subsequent page runs. If None, we never bail.
    """
        self._expectations = expectations_module.StoryExpectations()
        self._max_failures = max_failures
 def testGetBenchmarkExpectationsFromParserRefreeze(self):
   raw_data = [
       expectations_parser.Expectation(
           'crbug.com/23456', 'benchmark1/story', ['All'], ['Skip']),
   ]
   e = expectations.StoryExpectations()
   e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
   with self.assertRaises(AssertionError):
     e.DisableStory('story', [], 'reason')
 def testGetBenchmarkExpectationsFromParserNoBenchmarkMatch(self):
   raw_data = [
       expectations_parser.Expectation(
           'crbug.com/12345', 'benchmark2/story', ['All'], ['Skip']),
   ]
   e = expectations.StoryExpectations()
   e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
   actual = e.AsDict()
   expected = {'platforms': [], 'stories': {}}
   self.assertEqual(actual, expected)
Example #8
0
 def testGetBenchmarkExpectationsFromParserDisableBenchmark(self):
     raw_data = [
         expectations_parser.Expectation('crbug.com/123', 'benchmark1/*',
                                         ['Desktop', 'Linux'], ['Skip']),
     ]
     e = expectations.StoryExpectations()
     e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
     actual = self._ConvertTestConditionsToStrings(e.AsDict())
     expected = {
         'platforms': [('Desktop+Linux', 'crbug.com/123')],
         'stories': {},
     }
     self.assertEqual(actual, expected)
 def testGetBenchmarkExpectationsFromParserMultipleConditions(self):
   raw_data = [
       expectations_parser.Expectation(
           'crbug.com/23456', 'benchmark1/story', ['Win', 'Mac'], ['Skip']),
   ]
   e = expectations.StoryExpectations()
   e.GetBenchmarkExpectationsFromParser(raw_data, 'benchmark1')
   actual = self._ConvertTestConditionsToStrings(e.AsDict())
   expected = {
       'platforms': [],
       'stories': {
           'story': [(['Win+Mac'], 'crbug.com/23456')],
       }
   }
   self.assertEqual(actual, expected)
 def testCantDisableAfterInit(self):
     e = expectations.StoryExpectations()
     with self.assertRaises(AssertionError):
         e.PermanentlyDisableBenchmark(['test'], 'test')
     with self.assertRaises(AssertionError):
         e.DisableStory('story', ['platform'], 'reason')