def test_can_construct_runner_object_minimum_parameters(self):
     # This tests that constructing the benchmark_runner object specifying the minimum required paramaters is ok.
     plan_list = BenchmarkRunner.available_plans()
     build_dir = os.path.abspath(os.curdir)
     runner = FakeBenchmarkRunner(plan_list[0], False, 1, build_dir,
                                  "/tmp/testOutput.txt", "fake", None, None)
     self.assertTrue(runner.execute())
Beispiel #2
0
def start(args):
    if args.json_file:
        results_json = json.load(open(args.json_file, 'r'))
        if 'debugOutput' in results_json:
            del results_json['debugOutput']
        BenchmarkRunner.show_results(results_json, args.scale_unit,
                                     args.show_iteration_values)
        return
    if args.allplans:
        failed = []
        skipped = []
        planlist = BenchmarkRunner.available_plans()
        skippedfile = os.path.join(BenchmarkRunner.plan_directory(), 'Skipped')
        if not planlist:
            raise Exception('Cant find any .plan file in directory %s' %
                            BenchmarkRunner.plan_directory())
        if os.path.isfile(skippedfile):
            skipped = [
                line.strip() for line in open(skippedfile)
                if not line.startswith('#') and len(line) > 1
            ]
        for plan in sorted(planlist):
            if plan in skipped:
                _log.info(
                    'Skipping benchmark plan: %s because is listed on the Skipped file'
                    % plan)
                continue
            _log.info('Starting benchmark plan: %s' % plan)
            try:
                run_benchmark_plan(args, plan)
                _log.info('Finished benchmark plan: %s' % plan)
            except KeyboardInterrupt:
                raise
            except:
                failed.append(plan)
                _log.exception('Error running benchmark plan: %s' % plan)
        if failed:
            _log.error('The following benchmark plans have failed: %s' %
                       failed)
        return len(failed)
    if args.list_plans:
        list_benchmark_plans()
        return

    run_benchmark_plan(args, args.plan)
Beispiel #3
0
 def __init__(self, args):
     self._args = args
     self._plandir = os.path.abspath(BenchmarkRunner.plan_directory())
     if not os.path.isdir(self._plandir):
         raise Exception('Cant find plandir: {plandir}'.format(plandir=self._plandir))
     self._parse_config_file(self._args.config_file)
     # This is the dictionary that will be sent as the HTTP POST request that browserperfdash expects
     # (as defined in https://github.com/Igalia/browserperfdash/blob/master/docs/design-document.md)
     # - The bot_* data its obtained from the config file
     # - the browser_* data is given at startup time via command-line arguments
     # - The test_* data is generated after each test run.
     self._result_data = {'bot_id': None,
                          'bot_password': None,
                          'browser_id': self._args.browser,
                          'browser_version': self._args.browser_version,
                          'test_id': None,
                          'test_version': None,
                          'test_data': None}
Beispiel #4
0
def list_benchmark_plans():
    print("Available benchmark plans: ")
    for plan in BenchmarkRunner.available_plans():
        print("\t%s" % plan)
Beispiel #5
0
    def run(self):
        failed = []
        worked = []
        skipped = []
        planlist = []
        if self._args.plan:
            if not os.path.isfile(
                    os.path.join(
                        self._plandir,
                        '{plan_name}.plan'.format(plan_name=self._args.plan))):
                raise Exception(
                    'Cant find a file named {plan_name}.plan in directory {plan_directory}'
                    .format(plan_name=self._args.plan,
                            plan_directory=self._plandir))
            planlist = [self._args.plan]
        elif self._args.allplans:
            planlist = BenchmarkRunner.available_plans()
            skippedfile = os.path.join(self._plandir, 'Skipped')
            if not planlist:
                raise Exception(
                    'Cant find any plan in the directory {plan_directory}'.
                    format(plan_directory=self._plandir))
            if os.path.isfile(skippedfile):
                skipped = [
                    line.strip() for line in open(skippedfile)
                    if not line.startswith('#') and len(line) > 1
                ]

        if len(planlist) < 1:
            _log.error(
                'No benchmarks plans available to run in directory {plan_directory}'
                .format(plan_directory=self._plandir))
            return 1

        _log.info(
            'Starting benchmark for browser {browser} and version {browser_version}'
            .format(browser=self._args.browser,
                    browser_version=self._args.browser_version))

        iteration_count = 0
        for plan in sorted(planlist):
            iteration_count += 1
            if plan in skipped:
                _log.info(
                    'Skipping benchmark plan: {plan_name} because is listed on the Skipped file [benchmark {iteration} of {total}]'
                    .format(plan_name=plan,
                            iteration=iteration_count,
                            total=len(planlist)))
                continue
            _log.info(
                'Starting benchmark plan: {plan_name} [benchmark {iteration} of {total}]'
                .format(plan_name=plan,
                        iteration=iteration_count,
                        total=len(planlist)))
            try:
                # Run test and save test info
                with tempfile.NamedTemporaryFile() as temp_result_file:
                    benchmark_runner_class = benchmark_runner_subclasses[
                        self._args.driver]
                    runner = benchmark_runner_class(
                        plan, self._args.localCopy,
                        self._args.timeoutFactorOverride,
                        self._args.countOverride, self._args.buildDir,
                        temp_result_file.name, self._args.platform,
                        self._args.browser)
                    runner.execute()
                    _log.info('Finished benchmark plan: {plan_name}'.format(
                        plan_name=plan))
                    # Fill test info for upload
                    self._result_data['test_id'] = plan
                    self._result_data[
                        'test_version'] = self._get_test_version_string(plan)
                    # Fill obtained test results for upload
                    self._result_data[
                        'test_data'] = self._get_test_data_json_string(
                            temp_result_file)

                # Now upload data to server(s)
                _log.info(
                    'Uploading results for plan: {plan_name} and browser {browser} version {browser_version}'
                    .format(plan_name=plan,
                            browser=self._args.browser,
                            browser_version=self._args.browser_version))
                if self._upload_result():
                    worked.append(plan)
                else:
                    failed.append(plan)

            except KeyboardInterrupt:
                raise
            except:
                failed.append(plan)
                _log.exception(
                    'Error running benchmark plan: {plan_name}'.format(
                        plan_name=plan))

        if len(worked) > 0:
            _log.info(
                'The following benchmark plans have been upload succesfully: {list_plan_worked}'
                .format(list_plan_worked=worked))

        if len(failed) > 0:
            _log.error(
                'The following benchmark plans have failed to run or to upload: {list_plan_failed}'
                .format(list_plan_failed=failed))
            return len(failed)

        return 0
Beispiel #6
0
 def test_list_plans_at_least_five(self):
     plan_list = BenchmarkRunner.available_plans()
     self.assertTrue(len(plan_list) > 4)