Пример #1
0
def parse_args():
    parser = argparse.ArgumentParser(description='Run browser based performance benchmarks. To run a single benchmark in the recommended way, use run-benchmark --plan. To see the vailable benchmarks, use run-benchmark --list-plans.')
    mutual_group = parser.add_mutually_exclusive_group(required=True)
    mutual_group.add_argument('--plan', help='Run a specific benchmark plan (e.g. speedometer, jetstream).')
    mutual_group.add_argument('--list-plans', action='store_true', help='List all available benchmark plans.')
    mutual_group.add_argument('--allplans', action='store_true', help='Run all available benchmark plans in order.')
    mutual_group.add_argument('--read-results-json', dest='json_file', help='Instead of running a benchmark, format the output saved in JSON_FILE.')
    parser.add_argument('--output-file', default=None, help='Save detailed results to OUTPUT in JSON format. By default, results will not be saved.')
    parser.add_argument('--count', type=int, help='Number of times to run the benchmark (e.g. 5).')
    parser.add_argument('--driver', default=WebServerBenchmarkRunner.name, choices=benchmark_runner_subclasses.keys(), help='Use the specified benchmark driver. Defaults to %s.' % WebServerBenchmarkRunner.name)
    parser.add_argument('--browser', default=default_browser(), choices=BrowserDriverFactory.available_browsers(), help='Browser to run the nechmark in. Defaults to %s.' % default_browser())
    parser.add_argument('--platform', default=default_platform(), choices=BrowserDriverFactory.available_platforms(), help='Platform that this script is running on. Defaults to %s.' % default_platform())
    parser.add_argument('--local-copy', help='Path to a local copy of the benchmark (e.g. PerformanceTests/SunSpider/).')
    parser.add_argument('--device-id', default=None, help='Undocumented option for mobile device testing.')
    parser.add_argument('--debug', action='store_true', help='Enable debug logging.')
    parser.add_argument('--diagnose-directory', dest='diagnose_dir', default=None, help='Directory for storing diagnose information on test failure. It\'s up to browser driver implementation when this option is not specified.')
    parser.add_argument('--no-adjust-unit', dest='scale_unit', action='store_false', help="Don't convert to scientific notation.")
    parser.add_argument('--show-iteration-values', dest='show_iteration_values', action='store_true', help="Show the measured value for each iteration in addition to averages.")

    group = parser.add_mutually_exclusive_group()
    group.add_argument('--browser-path', help='Specify the path to a non-default copy of the target browser as a path to the .app.')
    group.add_argument('--build-directory', dest='build_dir', help='Path to the browser executable (e.g. WebKitBuild/Release/).')

    args = parser.parse_args()

    if args.debug:
        _log.setLevel(logging.DEBUG)
    _log.debug('Initializing program with following parameters')
    _log.debug('\toutput file name\t: %s' % args.output_file)
    _log.debug('\tbuild directory\t: %s' % args.build_dir)
    _log.debug('\tplan name\t: %s', args.plan)

    return args
Пример #2
0
def parse_args():
    parser = argparse.ArgumentParser(
        description='Automate the browser based performance benchmarks')
    # browserperfdash specific arguments.
    parser.add_argument(
        '--config-file',
        dest='config_file',
        default=None,
        required=True,
        help=
        'Configuration file for sending the results to the performance dashboard server(s).'
    )
    parser.add_argument('--browser-version',
                        dest='browser_version',
                        default=None,
                        required=True,
                        help='A string that identifies the browser version.')
    # arguments shared with run-benchmark.
    parser.add_argument(
        '--build-directory',
        dest='buildDir',
        help='Path to the browser executable. e.g. WebKitBuild/Release/')
    parser.add_argument('--platform',
                        dest='platform',
                        default=default_platform(),
                        choices=BrowserDriverFactory.available_platforms())
    parser.add_argument('--browser',
                        dest='browser',
                        default=default_browser(),
                        choices=BrowserDriverFactory.available_browsers())
    parser.add_argument(
        '--driver',
        default=WebServerBenchmarkRunner.name,
        choices=benchmark_runner_subclasses.keys(),
        help='Use the specified benchmark driver. Defaults to %s.' %
        WebServerBenchmarkRunner.name)
    parser.add_argument(
        '--local-copy',
        dest='localCopy',
        help=
        'Path to a local copy of the benchmark. e.g. PerformanceTests/SunSpider/'
    )
    parser.add_argument('--count',
                        dest='countOverride',
                        type=int,
                        help='Number of times to run the benchmark. e.g. 5')
    mutual_group = parser.add_mutually_exclusive_group(required=True)
    mutual_group.add_argument(
        '--plan',
        dest='plan',
        help='Benchmark plan to run. e.g. speedometer, jetstream')
    mutual_group.add_argument(
        '--allplans',
        action='store_true',
        help='Run all available benchmark plans sequentially')
    args = parser.parse_args()
    return args
Пример #3
0
 def __init__(self, plan_file, local_copy, count_override, build_dir, output_file, platform, browser, browser_path, scale_unit=True, show_iteration_values=False, device_id=None, diagnose_dir=None):
     try:
         plan_file = self._find_plan_file(plan_file)
         with open(plan_file, 'r') as fp:
             self._plan_name = os.path.split(os.path.splitext(plan_file)[0])[1]
             self._plan = json.load(fp)
             if not 'options' in self._plan:
                 self._plan['options'] = {}
             if local_copy:
                 self._plan['local_copy'] = local_copy
             if count_override:
                 self._plan['count'] = count_override
             self._browser_driver = BrowserDriverFactory.create(platform, browser)
             self._browser_path = browser_path
             self._build_dir = os.path.abspath(build_dir) if build_dir else None
             self._diagnose_dir = os.path.abspath(diagnose_dir) if diagnose_dir else None
             self._output_file = output_file
             self._scale_unit = scale_unit
             self._show_iteration_values = show_iteration_values
             self._config = self._plan.get('config', {})
             if device_id:
                 self._config['device_id'] = device_id
     except IOError as error:
         _log.error('Can not open plan file: {plan_file} - Error {error}'.format(plan_file=plan_file, error=error))
         raise error
     except ValueError as error:
         _log.error('Plan file: {plan_file} may not follow JSON format - Error {error}'.format(plan_file=plan_file, error=error))
         raise error
Пример #4
0
        pass

    def restore_env_after_all_testing(self):
        pass

    def close_browsers(self):
        pass

    def launch_url(self, url, options, browser_build_path, browser_path):
        pass

    def launch_webdriver(self, url, driver):
        pass


BrowserDriverFactory.add_browser_driver("fake", None, FakeBrowserDriver)


class FakeBenchmarkRunner(BenchmarkRunner):
    name = 'fake'

    def __init__(self, plan_file, local_copy, count_override, build_dir,
                 output_file, platform, browser, browser_path):
        super(FakeBenchmarkRunner,
              self).__init__(plan_file, local_copy, count_override, build_dir,
                             output_file, platform, browser, browser_path)

    def execute(self):
        return True