def parse_args(): parser = argparse.ArgumentParser(description='Automate the browser based performance benchmarks') parser.add_argument('--output-file', dest='output', default=None) parser.add_argument('--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/') parser.add_argument('--platform', dest='platform', default='osx', choices=BrowserDriverFactory.available_platforms()) # FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS. parser.add_argument('--browser', dest='browser', default='safari', choices=BrowserDriverFactory.available_browsers()) parser.add_argument('--debug', action='store_true') parser.add_argument('--local-copy', dest='localCopy', help='Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/') parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5') parser.add_argument('--device-id', dest='device_id', default=None) parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false') mutual_group = parser.add_mutually_exclusive_group(required=True) mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format') mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream') args = parser.parse_args() if args.debug: _log.setLevel(logging.DEBUG) _log.debug('Initializing program with following parameters') _log.debug('\toutput file name\t: %s' % args.output) _log.debug('\tbuild directory\t: %s' % args.buildDir) _log.debug('\tplan name\t: %s', args.plan) return args
def parse_args(): parser = argparse.ArgumentParser(description='Run browser based performance benchmarks. To run a single benchmark in the recommended way, use run-benchmark --plan. To see the vailable benchmarks, use run-benchmark --list-plans.') mutual_group = parser.add_mutually_exclusive_group(required=True) mutual_group.add_argument('--plan', help='Run a specific benchmark plan (e.g. speedometer, jetstream).') mutual_group.add_argument('--list-plans', action='store_true', help='List all available benchmark plans.') mutual_group.add_argument('--allplans', action='store_true', help='Run all available benchmark plans in order.') mutual_group.add_argument('--read-results-json', dest='json_file', help='Instead of running a benchmark, format the output saved in JSON_FILE.') parser.add_argument('--output-file', default=None, help='Save detailed results to OUTPUT in JSON format. By default, results will not be saved.') parser.add_argument('--count', type=int, help='Number of times to run the benchmark (e.g. 5).') parser.add_argument('--driver', default=WebServerBenchmarkRunner.name, choices=benchmark_runner_subclasses.keys(), help='Use the specified benchmark driver. Defaults to %s.' % WebServerBenchmarkRunner.name) parser.add_argument('--browser', default=default_browser(), choices=BrowserDriverFactory.available_browsers(), help='Browser to run the nechmark in. Defaults to %s.' % default_browser()) parser.add_argument('--platform', default=default_platform(), choices=BrowserDriverFactory.available_platforms(), help='Platform that this script is running on. Defaults to %s.' % default_platform()) parser.add_argument('--local-copy', help='Path to a local copy of the benchmark (e.g. PerformanceTests/SunSpider/).') parser.add_argument('--device-id', default=None, help='Undocumented option for mobile device testing.') parser.add_argument('--debug', action='store_true', help='Enable debug logging.') parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false', help="Don't convert to scientific notation.") parser.add_argument('--show-iteration-values', dest='show_iteration_values', action='store_true', help="Show the measured value for each iteration in addition to averages.") group = parser.add_mutually_exclusive_group() group.add_argument('--browser-path', help='Specify the path to a non-default copy of the target browser as a path to the .app.') group.add_argument('--build-directory', dest='build_dir', help='Path to the browser executable (e.g. WebKitBuild/Release/).') args = parser.parse_args() if args.debug: _log.setLevel(logging.DEBUG) _log.debug('Initializing program with following parameters') _log.debug('\toutput file name\t: %s' % args.output_file) _log.debug('\tbuild directory\t: %s' % args.build_dir) _log.debug('\tplan name\t: %s', args.plan) return args
def parse_args(): parser = argparse.ArgumentParser(description='Automate the browser based performance benchmarks') parser.add_argument('--output-file', dest='output', default=None) parser.add_argument('--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/') parser.add_argument('--platform', dest='platform', default='osx', choices=BrowserDriverFactory.available_platforms()) # FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS. parser.add_argument('--browser', dest='browser', default='safari', choices=BrowserDriverFactory.available_browsers()) parser.add_argument('--debug', action='store_true') parser.add_argument('--local-copy', dest='localCopy', help='Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/') parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5') parser.add_argument('--device-id', dest='device_id', default=None) parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false') mutual_group = parser.add_mutually_exclusive_group(required=True) mutual_group.add_argument('--read-results-json', dest='json_file', help='Specify file you want to format') mutual_group.add_argument('--plan', dest='plan', help='Benchmark plan to run. e.g. speedometer, jetstream') mutual_group.add_argument('--allplans', action='store_true', help='Run all available benchmark plans sequentially') args = parser.parse_args() if args.debug: _log.setLevel(logging.DEBUG) _log.debug('Initializing program with following parameters') _log.debug('\toutput file name\t: %s' % args.output) _log.debug('\tbuild directory\t: %s' % args.buildDir) _log.debug('\tplan name\t: %s', args.plan) return args
def parse_args(): parser = argparse.ArgumentParser( description='Automate the browser based performance benchmarks') parser.add_argument('--output-file', dest='output', default=None) parser.add_argument( '--build-directory', dest='buildDir', help='Path to the browser executable. e.g. WebKitBuild/Release/') parser.add_argument( '--plan', dest='plan', required=True, help='Benchmark plan to run. e.g. speedometer, jetstream') parser.add_argument('--platform', dest='platform', required=True, choices=BrowserDriverFactory.available_platforms()) # FIXME: Should we add chrome as an option? Well, chrome uses webkit in iOS. parser.add_argument('--browser', dest='browser', required=True, choices=BrowserDriverFactory.available_browsers()) parser.add_argument('--debug', action='store_true') parser.add_argument( '--local-copy', dest='localCopy', help= 'Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/' ) parser.add_argument('--count', dest='countOverride', type=int, help='Number of times to run the benchmark. e.g. 5') parser.add_argument('--http-server-driver', dest='httpServerDriverOverride', default=None, help='Specify which HTTP server you wants to use') parser.add_argument('--device-id', dest='device_id', default=None) args = parser.parse_args() if args.debug: _log.setLevel(logging.DEBUG) _log.debug('Initializing program with following parameters') _log.debug('\toutput file name\t: %s' % args.output) _log.debug('\tbuild directory\t: %s' % args.buildDir) _log.debug('\tplan name\t: %s', args.plan) return args