def testMissingArchiveDataFileWithSkippedStory(self): story_set = test_stories.DummyStorySet(['story']) self._mock_story_filter.ShouldSkip.return_value = True success = story_runner._UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter) self.assertTrue(success)
def testArchiveDataFileDoesNotExist(self): story_set = test_stories.DummyStorySet( ['story'], archive_data_file='does_not_exist.json') with self.assertRaises(story_runner.ArchiveError): story_runner._UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter)
def testMissingArchiveDataFile(self): story_set = test_stories.DummyStorySet(['story']) with self.assertRaises(story_runner.ArchiveError): story_runner._UpdateAndCheckArchives(story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter)
def testUpdateAndCheckArchivesSuccess(self): # This test file has a recording for a 'http://www.testurl.com' story only. archive_data_file = os.path.join(util.GetUnittestDataDir(), 'archive_files', 'test.json') story_set = test_stories.DummyStorySet( ['http://www.testurl.com'], archive_data_file=archive_data_file) success = story_runner._UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter) self.assertTrue(success)
def testArchiveWithMissingStory(self): # This test file has a recording for a 'http://www.testurl.com' story only. archive_data_file = os.path.join( util.GetUnittestDataDir(), 'archive_files', 'test.json') story_set = test_stories.DummyStorySet( ['http://www.testurl.com', 'http://www.google.com'], archive_data_file=archive_data_file) with self.assertRaises(story_runner.ArchiveError): story_runner._UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter)
def __init__(self, stories=None, **kwargs): """A customizable fake_benchmark. Args: stories: Optional sequence of either story names or objects. Instances of DummyStory are useful here. If omitted the benchmark will contain a single DummyStory. other kwargs are passed to the test_stories.DummyStorySet constructor. """ super(FakeBenchmark, self).__init__() self._story_set = test_stories.DummyStorySet( stories if stories is not None else ['story'], **kwargs)
def testArchiveWithMissingWprFile(self): # This test file claims to have recordings for both # 'http://www.testurl.com' and 'http://www.google.com'; but the file with # the wpr recording for the later story is actually missing. archive_data_file = os.path.join( util.GetUnittestDataDir(), 'archive_files', 'test_missing_wpr_file.json') story_set = test_stories.DummyStorySet( ['http://www.testurl.com', 'http://www.google.com'], archive_data_file=archive_data_file) with self.assertRaises(story_runner.ArchiveError): story_runner._UpdateAndCheckArchives( story_set.archive_data_file, story_set.wpr_archive_info, story_set.stories, self._mock_story_filter)
def testStreamingResults(self): stories = test_stories.DummyStorySet(['story1', 'story2']) with self.CreateResults() as results: with results.CreateStoryRun(stories[0]): self._mock_time.return_value = 0.007 expected = ('[ RUN ] benchmark/story1\n' '[ OK ] benchmark/story1 (7 ms)\n') self.assertOutputEquals(expected) with results.CreateStoryRun(stories[1]): self._mock_time.return_value = 0.009 results.Fail('test fails') expected = ('[ RUN ] benchmark/story1\n' '[ OK ] benchmark/story1 (7 ms)\n' '[ RUN ] benchmark/story2\n' '[ FAILED ] benchmark/story2 (2 ms)\n') self.assertOutputEquals(expected)
def testPassAndFailedStories(self): stories = test_stories.DummyStorySet( ['story1', 'story2', 'story3', 'story4', 'story5', 'story6']) with self.CreateResults() as results: with results.CreateStoryRun(stories[0]): self._mock_time.return_value = 0.007 with results.CreateStoryRun(stories[1]): self._mock_time.return_value = 0.009 results.Fail('test fails') with results.CreateStoryRun(stories[2]): self._mock_time.return_value = 0.015 results.Fail('test fails') with results.CreateStoryRun(stories[3]): self._mock_time.return_value = 0.020 with results.CreateStoryRun(stories[4]): self._mock_time.return_value = 0.025 with results.CreateStoryRun(stories[5]): self._mock_time.return_value = 0.030 results.Fail('test fails') expected = ('[ RUN ] benchmark/story1\n' '[ OK ] benchmark/story1 (7 ms)\n' '[ RUN ] benchmark/story2\n' '[ FAILED ] benchmark/story2 (2 ms)\n' '[ RUN ] benchmark/story3\n' '[ FAILED ] benchmark/story3 (6 ms)\n' '[ RUN ] benchmark/story4\n' '[ OK ] benchmark/story4 (5 ms)\n' '[ RUN ] benchmark/story5\n' '[ OK ] benchmark/story5 (5 ms)\n' '[ RUN ] benchmark/story6\n' '[ FAILED ] benchmark/story6 (5 ms)\n' '[ PASSED ] 3 tests.\n' '[ FAILED ] 3 tests, listed below:\n' '[ FAILED ] benchmark/story2\n' '[ FAILED ] benchmark/story3\n' '[ FAILED ] benchmark/story6\n\n' '3 FAILED TESTS\n\n') self.assertOutputEquals(expected)
def RunStories(self, stories, **kwargs): story_set = test_stories.DummyStorySet(stories) with results_options.CreateResults( self.options, benchmark_name='benchmark') as results: story_runner.RunStorySet(self.mock_story_test, story_set, self.options, results, **kwargs)
def setUp(self): self.stories = test_stories.DummyStorySet(['foo', 'bar', 'baz']) self.intermediate_dir = tempfile.mkdtemp() self._time_module = mock.patch( 'telemetry.internal.results.page_test_results.time').start() self._time_module.time.return_value = 0