Пример #1
0
 def testRunPerformanceTestsTelemetryCommandGenerator_StorySelectionConfig_Abridged(self):
   options = run_performance_tests.parse_arguments([
       '../../tools/perf/run_benchmark', '--browser=release_x64',
       '--run-ref-build',
       r'--isolated-script-test-output=c:\a\b\c\output.json',
   ])
   story_selection_config = {
       'abridged': True,
   }
   command = run_performance_tests.TelemetryCommandGenerator(
       'fake_benchmark_name', options, story_selection_config).generate(
           'fake_output_dir')
   self.assertIn('--run-abridged-story-set', command)
 def testRunPerformanceTestsGtestArgsParser(self):
    options = run_performance_tests.parse_arguments([
       'media_perftests', '--non-telemetry=true', '--single-process-tests',
       '--test-launcher-retry-limit=0',
       '--isolated-script-test-filter=*::-*_unoptimized::*_unaligned::'
       '*unoptimized_aligned',
       '--gtest-benchmark-name', 'media_perftests',
       '--isolated-script-test-output=/x/y/z/output.json',
    ])
    self.assertIn('--single-process-tests', options.passthrough_args)
    self.assertIn('--test-launcher-retry-limit=0', options.passthrough_args)
    self.assertEqual(options.executable, 'media_perftests')
    self.assertEqual(options.isolated_script_test_output,
                     r'/x/y/z/output.json')
 def testRunPerformanceTestsTelemetryCommandGenerator_ReferenceBrowserComeLast(self):
   """This tests for crbug.com/928928."""
   options = run_performance_tests.parse_arguments([
       '../../tools/perf/run_benchmark', '--browser=release_x64',
       '--run-ref-build',
       '--test-shard-map-filename=win-10-perf_map.json',
       r'--isolated-script-test-output=c:\a\b\c\output.json',
   ])
   self.assertIn('--browser=release_x64', options.passthrough_args)
   command = run_performance_tests.TelemetryCommandGenerator(
       'fake_benchmark_name', options, is_reference=True).generate(
           'fake_output_dir')
   original_browser_arg_index = command.index('--browser=release_x64')
   reference_browser_arg_index = command.index('--browser=reference')
   self.assertTrue(reference_browser_arg_index > original_browser_arg_index)
 def testRunPerformanceTestsTelemetryArgsParser(self):
   options = run_performance_tests.parse_arguments([
       '../../tools/perf/run_benchmark', '-v', '--browser=release_x64',
       '--upload-results', '--run-ref-build',
       '--test-shard-map-filename=win-10-perf_map.json',
       '--assert-gpu-compositing',
       r'--isolated-script-test-output=c:\a\b\c\output.json',
       r'--isolated-script-test-perf-output=c:\a\b\c\perftest-output.json',
       '--passthrough-arg=--a=b',
   ])
   self.assertIn('--assert-gpu-compositing', options.passthrough_args)
   self.assertIn('--browser=release_x64', options.passthrough_args)
   self.assertIn('-v', options.passthrough_args)
   self.assertIn('--a=b', options.passthrough_args)
   self.assertEqual(options.executable, '../../tools/perf/run_benchmark')
   self.assertEqual(options.isolated_script_test_output,
                    r'c:\a\b\c\output.json')
 def setUp(self):
     fake_args = [
         './run_benchmark', '--isolated-script-test-output=output.json'
     ]
     self._fake_options = run_performance_tests.parse_arguments(fake_args)