def testRunBenchmark_TooManyArgs(self): with self.assertRaises(ParserError): parser.ParseArgs(self.mock_config, [ 'run', 'example_benchmark', 'other', '--browser=beta', 'args' ]) self._optparse_error.assert_called_with( 'unrecognized arguments: other args')
def testRunBenchmark_UnknownArg(self): with self.assertRaises(ParserError): parser.ParseArgs( self.mock_config, ['run', 'example_benchmark', '--non-existent-option']) self._optparse_error.assert_called_with( 'no such option: --non-existent-option')
def testRunBenchmark_WithExternalHelp(self): """Test `run --help` message includes both our and external options.""" my_parser = argparse.ArgumentParser(add_help=False) my_parser.add_argument('--extra-special-option', action='store_true') with self.assertRaises(ParserExit): parser.ParseArgs(self.config, ['run', '--help'], results_arg_parser=my_parser) self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue()) self.assertIn('--extra-special-option', sys.stdout.getvalue())
def testListBenchmarks_NoExternalOptions(self): my_parser = argparse.ArgumentParser() my_parser.add_argument('--extra-special-option', action='store_true') with self.assertRaises(ParserError): # Listing benchmarks does not require the external results processor. parser.ParseArgs(self.config, ['list', '--extra-special-option'], results_arg_parser=my_parser) self._optparse_error.assert_called_with( 'no such option: --extra-special-option')
def testListBenchmarks_WithExternalHelp(self): """Test `list --help` message does not include external options.""" my_parser = argparse.ArgumentParser(add_help=False) my_parser.add_argument('--extra-special-option', action='store_true') with self.assertRaises(ParserExit): parser.ParseArgs(self.mock_config, ['list', '--help'], results_arg_parser=my_parser) self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue()) self.assertNotIn('--extra-special-option', sys.stdout.getvalue())
def testRunBenchmark_ExternalOption(self): my_parser = argparse.ArgumentParser(add_help=False) my_parser.add_argument('--extra-special-option', action='store_true') args = parser.ParseArgs( self.mock_config, ['run', 'example_benchmark', '--extra-special-option'], results_arg_parser=my_parser) self.assertEqual(args.command, 'run') self.assertEqual(args.positional_args, ['example_benchmark']) self.assertTrue(args.extra_special_option)
def testRunBenchmark_WithCustomOptionDefaults(self): class BenchmarkWithCustomDefaults(benchmark.Benchmark): options = {'upload_results': True} @classmethod def Name(cls): return 'custom_benchmark' self.benchmarks.append(BenchmarkWithCustomDefaults) args = parser.ParseArgs(self.mock_config, ['custom_benchmark', '--browser', 'stable']) self.assertTrue(args.upload_results) self.assertEqual(args.positional_args, ['custom_benchmark'])
def testRunCommandIsDefault(self): args = parser.ParseArgs(self.mock_config, ['example_benchmark', '--browser', 'stable']) self.assertEqual(args.command, 'run') self.assertEqual(args.positional_args, ['example_benchmark']) self.assertEqual(args.browser_type, 'stable')
def testListBenchmarks(self): args = parser.ParseArgs(self.mock_config, ['list', '--json', 'output.json']) self.assertEqual(args.command, 'list') self.assertEqual(args.json_filename, 'output.json')
def testRunBenchmarkHelp(self): with self.assertRaises(ParserExit): parser.ParseArgs(self.mock_config, ['example_benchmark', '--help']) self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
def testHelpCommand(self): with self.assertRaises(ParserExit): parser.ParseArgs(self.mock_config, ['help', 'run']) self.assertIn('To get help about a command use', sys.stdout.getvalue())
def testHelpFlag(self): with self.assertRaises(ParserExit): parser.ParseArgs(self.mock_config, ['--help']) self.assertIn('Command line tool to run performance benchmarks.', sys.stdout.getvalue())
def testRunHelp(self): with self.assertRaises(ParserExit): parser.ParseArgs(self.config, ['run', '--help']) self.assertIn('--browser=BROWSER_TYPE', sys.stdout.getvalue())
def testRunBenchmark_MissingBenchmark(self): with self.assertRaises(ParserError): parser.ParseArgs(self.mock_config, ['run', '--browser=stable']) self._optparse_error.assert_called_with( 'missing required argument: benchmark_name')
def testRunBenchmark_UnknownBenchmark(self): with self.assertRaises(ParserError): parser.ParseArgs(self.mock_config, ['run', 'foo.benchmark', '--browser=stable']) self._optparse_error.assert_called_with( 'no such benchmark: foo.benchmark')
def testRunCommandBenchmarkNameAtEnd(self): args = parser.ParseArgs( self.config, ['--browser', 'stable', 'tbm_sample.tbm_sample']) self.assertEqual(args.command, 'run') self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample']) self.assertEqual(args.browser_type, 'stable')
def testRunCommandIsDefault(self): args = parser.ParseArgs( self.config, ['tbm_sample.tbm_sample', '--browser', 'stable']) self.assertEqual(args.command, 'run') self.assertEqual(args.positional_args, ['tbm_sample.tbm_sample']) self.assertEqual(args.browser_type, 'stable')