def validate_story_names(benchmarks, raw_expectations_data): for benchmark in benchmarks: if benchmark.Name() in CLUSTER_TELEMETRY_BENCHMARKS: continue b = benchmark() b.AugmentExpectationsWithParser(raw_expectations_data) story_set = benchmark_utils.GetBenchmarkStorySet(b) failed_stories = b.GetBrokenExpectations(story_set) assert not failed_stories, 'Incorrect story names: %s' % str( failed_stories)
def stories(self): if self._stories != None: return self._stories else: story_set = benchmark_utils.GetBenchmarkStorySet(self.benchmark()) abridged_story_set_tag = ( story_set.GetAbridgedStorySetTagFilter() if self.abridged else None) story_filter_obj = story_filter.StoryFilter( abridged_story_set_tag=abridged_story_set_tag) stories = story_filter_obj.FilterStories(story_set) self._stories = [story.name for story in stories] return self._stories
def validate_story_names(benchmarks, test_expectations): stories = [] for benchmark in benchmarks: if benchmark.Name() in CLUSTER_TELEMETRY_BENCHMARKS: continue story_set = benchmark_utils.GetBenchmarkStorySet(benchmark()) stories.extend( [benchmark.Name() + '/' + s.name for s in story_set.stories]) broken_expectations = test_expectations.check_for_broken_expectations( stories) unused_patterns = '' for pattern in set([e.test for e in broken_expectations]): unused_patterns += ("Expectations with pattern '%s'" " do not apply to any stories\n" % pattern) assert not unused_patterns, unused_patterns