Exemple #1
0
    def _prepare_config(self, options, args, tool):
        results_directory = args[0]
        host = Host()
        host.initialize_scm()

        print 'Parsing full_results.json...'
        results_json_path = host.filesystem.join(results_directory, 'full_results.json')
        results_json = json_results_generator.load_json(host.filesystem, results_json_path)

        port = tool.port_factory.get()
        layout_tests_directory = port.layout_tests_dir()
        platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
        self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)

        print 'Gathering current baselines...'
        self._gather_baselines(results_json)

        return {
            'test_config': self._test_config,
            "results_json": results_json,
            "platforms_json": {
                'platforms': platforms,
                'defaultPlatform': port.name(),
            },
        }
Exemple #2
0
    def _prepare_config(self, options, args, tool):
        results_directory = args[0]
        host = Host()
        host.initialize_scm()

        print 'Parsing full_results.json...'
        results_json_path = host.filesystem.join(results_directory, 'full_results.json')
        results_json = json_results_generator.load_json(host.filesystem, results_json_path)

        port = tool.port_factory.get()
        layout_tests_directory = port.layout_tests_dir()
        platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
        self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)

        print 'Gathering current baselines...'
        self._gather_baselines(results_json)

        return {
            'test_config': self._test_config,
            "results_json": results_json,
            "platforms_json": {
                'platforms': platforms,
                'defaultPlatform': port.name(),
            },
        }
Exemple #3
0
    def main(self):
        args = sys.argv[1:]

        host = Host()
        host.initialize_scm()

        stderr = self._engage_awesome_stderr_hacks()

        # Checking for the verbose flag before calling check_webkit_style_parser()
        # lets us enable verbose logging earlier.
        is_verbose = "-v" in args or "--verbose" in args

        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
        _log.debug("Verbose logging enabled.")

        parser = checker.check_webkit_style_parser()
        (paths, options) = parser.parse(args)

        configuration = checker.check_webkit_style_configuration(options)

        paths = change_directory(host.filesystem,
                                 checkout_root=host.scm().checkout_root,
                                 paths=paths)

        style_processor = StyleProcessor(configuration)
        file_reader = TextFileReader(host.filesystem, style_processor)

        if paths and not options.diff_files:
            file_reader.process_paths(paths)
            file_reader.do_association_check(host.scm().checkout_root)
        else:
            changed_files = paths if options.diff_files else None
            patch = host.scm().create_patch(options.git_commit,
                                            changed_files=changed_files,
                                            git_index=options.git_index)
            patch_checker = PatchReader(file_reader)
            patch_checker.check(patch)

        error_count = style_processor.error_count
        file_count = file_reader.file_count
        delete_only_file_count = file_reader.delete_only_file_count

        _log.info("Total errors found: %d in %d files" %
                  (error_count, file_count))
        # We fail when style errors are found or there are no checked files.
        return error_count > 0 or (file_count == 0
                                   and delete_only_file_count == 0)
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(Port):
        def __init__(self, host):
            super(AllPlatformsPort, self).__init__(host, 'mac')
            self._platforms_by_directory = dict([
                (self._webkit_baseline_path(p), p)
                for p in test_config.platforms
            ])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory,
                                            test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host.initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(
            test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file,
                                                          baseline_extension,
                                                          all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(
                    platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory,
                                 expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
Exemple #5
0
    def main(self):
        args = sys.argv[1:]

        host = Host()
        host.initialize_scm()

        stderr = self._engage_awesome_stderr_hacks()

        # Checking for the verbose flag before calling check_webkit_style_parser()
        # lets us enable verbose logging earlier.
        is_verbose = "-v" in args or "--verbose" in args

        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
        _log.debug("Verbose logging enabled.")

        parser = checker.check_webkit_style_parser()
        (paths, options) = parser.parse(args)

        configuration = checker.check_webkit_style_configuration(options)

        paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)

        style_processor = StyleProcessor(configuration)
        file_reader = TextFileReader(host.filesystem, style_processor)

        if paths and not options.diff_files:
            file_reader.process_paths(paths)
        else:
            changed_files = paths if options.diff_files else None
            patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
            patch_checker = PatchReader(file_reader)
            patch_checker.check(patch)

        error_count = style_processor.error_count
        file_count = file_reader.file_count
        delete_only_file_count = file_reader.delete_only_file_count

        _log.info("Total errors found: %d in %d files" % (error_count, file_count))
        # We fail when style errors are found or there are no checked files.
        return error_count > 0
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(Port):
        def __init__(self, host):
            super(AllPlatformsPort, self).__init__(host, 'mac')
            self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host.initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory, expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)

        # The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
        if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
            self._options.webkit_test_runner = True

        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-drt-flag", action="append",
                default=[], help="Additional command line flag to pass to DumpRenderTree "
                     "Specify multiple times to add multiple flags."),
            optparse.make_option("--driver-name", type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option("--repeat", default=1, type="int",
                help="Specify number of times to run test set (default: 1)."),
            optparse.make_option("--test-runner-count", default=-1, type="int",
                help="Specify number of times to invoke test runner for each performance test."),
            optparse.make_option("--wrapper",
                help="wrapper command to insert before invocations of "
                 "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []

        test_runner_count = DEFAULT_TEST_RUNNER_COUNT
        if self._options.test_runner_count > 0:
            test_runner_count = self._options.test_runner_count
        elif self._options.profile:
            test_runner_count = 1

        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=test_runner_count)
            tests.append(test)

        return tests

    def run(self):
        if "Debug" == self._port.get_option("configuration"):
            _log.warning("""****************************************************
* WARNING: run-perf-tests is running in DEBUG mode *
****************************************************""")

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._port.check_sys_deps(needs_http=False):
            _log.error("Failed to check system dependencies.")
            self._port.stop_helper()
            return self.EXIT_CODE_BAD_PREPARATION

        run_count = 0
        repeat = self._options.repeat
        while (run_count < repeat):
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {'current': metric.grouped_iteration_values()}
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
Exemple #8
0
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--chromium",
                action="store_const",
                const="chromium",
                dest="platform",
                help="Alias for --platform=chromium",
            ),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date.",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--pause-before-testing",
                dest="pause_before_testing",
                action="store_true",
                default=False,
                help="Pause before running the tests to let user attach a performance monitor.",
            ),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page.",
            ),
            optparse.make_option(
                "--output-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists.",
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help="Clears the content in the generated JSON file before adding the results.",
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file.",
            ),
            optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests are done",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present.",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
            optparse.make_option(
                "--replay", dest="replay", action="store_true", default=False, help="Run replay tests."
            ),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_true",
                default=False,
                help="Run all tests, including the ones in the Skipped list.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = [".html", ".svg"]
        if self._options.replay:
            test_extensions.append(".replay")

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace("\\", "/")
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
        if self._options.generate_results:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(
            self._timestamp, options.description, options.platform, options.builder_name, options.build_number
        )

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + ".html"
        self._generate_output_files(output_json_path, results_page_path, output)

        if options.test_results_server:
            if not self._upload_json(options.test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING

        if options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {"results": self._results}
        if description:
            contents["description"] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + "-revision"] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {
            "timestamp": int(timestamp),
            "branch": self._default_branch,
            "platform": platform,
            "builder-name": builder_name,
            "build-number": int(build_number) if build_number else None,
        }.items():
            if value:
                contents[key] = value

        return contents

    def _merge_slave_config_json(self, slave_config_json_path, output):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            return dict(slave_config.items() + output.items())
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
Exemple #9
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform,
                                                     self._options)

        # Timeouts are controlled by the Python Driver, so DRT/WTR runs with no-timeout.
        self._options.additional_drt_flag.append('--no-timeout')

        # The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
        if self._port.name().startswith("gtk") or self._port.name().startswith(
                "efl"):
            self._options.webkit_test_runner = True

        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--builder-name",
                help=
                ("The name of the builder shown on the waterfall running this script e.g. google-mac-2."
                 )),
            optparse.make_option(
                "--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=
                "Check to ensure the DumpRenderTree build is up-to-date (default)."
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help=
                "Don't check to see if the DumpRenderTree build is up-to-date."
            ),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=600 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option("--no-timeout",
                                 action="store_true",
                                 default=False,
                                 help="Disable test timeouts"),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option(
                "--output-json-path",
                action='callback',
                callback=_expand_path,
                type="str",
                help=
                "Path to generate a JSON file at; may contain previous results if it already exists."
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help=
                "Clears the content in the generated JSON file before adding the results."
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action='callback',
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option(
                "--description",
                help=
                "Add a description to the output JSON file if one is generated"
            ),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                "--test-results-server",
                help=
                "Upload the generated JSON file to the specified server when --output-json-path is present."
            ),
            optparse.make_option(
                "--dump-render-tree",
                "-1",
                action="store_false",
                default=True,
                dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            optparse.make_option(
                "--force",
                dest="use_skipped_list",
                action="store_false",
                default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--repeat",
                default=1,
                type="int",
                help="Specify number of times to run test set (default: 1)."),
            optparse.make_option(
                "--test-runner-count",
                default=-1,
                type="int",
                help=
                "Specify number of times to invoke test runner for each performance test."
            ),
            optparse.make_option(
                "--wrapper",
                help="wrapper command to insert before invocations of "
                "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                "running. (Example: --wrapper='valgrind --smc-check=all')"),
            optparse.make_option(
                '--display-server',
                choices=['xvfb', 'xorg', 'weston', 'wayland'],
                default='xvfb',
                help=
                '"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
                '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'
            ),
        ]
        return optparse.OptionParser(
            option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path,
                                                     relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths,
                                     skipped_directories, _is_test_file)
        tests = []

        test_runner_count = DEFAULT_TEST_RUNNER_COUNT
        if self._options.test_runner_count > 0:
            test_runner_count = self._options.test_runner_count
        elif self._options.profile:
            test_runner_count = 1

        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace(
                '\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(
                    relative_path) and filesystem.normpath(
                        relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(
                self._port,
                relative_path,
                path,
                test_runner_count=test_runner_count)
            tests.append(test)

        return tests

    def run(self):
        if "Debug" == self._port.get_option("configuration"):
            _log.warning(
                """****************************************************
* WARNING: run-perf-tests is running in DEBUG mode *
****************************************************""")

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" %
                       self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._port.check_sys_deps(needs_http=False):
            _log.error("Failed to check system dependencies.")
            self._port.stop_helper()
            return self.EXIT_CODE_BAD_PREPARATION

        run_count = 0
        repeat = self._options.repeat
        while (run_count < repeat):
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count,
                                        repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            unexpected = self._run_tests_set(
                sorted(list(tests), key=lambda test: test.test_name()))

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(
                    test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(),
                                          self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(
            self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp,
                                             options.description,
                                             options.platform,
                                             options.builder_name,
                                             options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(
                options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(),
                                        'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(
            self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%',
                                        absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%',
                                            json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.native_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_native_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' +
                                      '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url(
                        'PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {
                        'current': metric.grouped_iteration_values()
                    }
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" %
                       slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(
                slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" %
                       (slave_config_json_path, error))
        return None
Exemple #10
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()
        self._needs_http = None
        self._has_http_lock = False

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--chromium-android",
                action="store_const", const='chromium-android', dest='platform', help='Alias for --platform=chromium-android'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
                help="Pause before running the tests to let user attach a performance monitor."),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="skipped", action="store_true", default=False,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                help="Output per-test profile information."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def _start_servers(self):
        if self._needs_http:
            self._port.acquire_http_lock()
            self._port.start_http_server(number_of_servers=2)
            self._has_http_lock = True

    def _stop_servers(self):
        if self._has_http_lock:
            self._port.stop_http_server()
            self._port.release_http_lock()

    def run(self):
        self._needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=self._needs_http):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        try:
            self._start_servers()
            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        finally:
            self._stop_servers()

        if self._options.generate_results and not self._options.profile:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
        self._generate_output_files(output_json_path, results_page_path, output)

        if options.test_results_server:
            if not self._upload_json(options.test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING

        if options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + '-revision'] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents

    def _merge_slave_config_json(self, slave_config_json_path, output):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            return dict(slave_config.items() + output.items())
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
Exemple #11
0
class W3CTestConverter(object):
    def __init__(self):
        self._host = Host()
        self._filesystem = self._host.filesystem
        self._host.initialize_scm()
        self._webkit_root = self._host.scm().checkout_root

        # These settings might vary between WebKit and Blink
        self._css_property_file = self.path_from_webkit_root(
            'Source', 'core', 'css', 'CSSPropertyNames.in')
        self._css_property_split_string = 'alias_for='

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list(
        )

    def path_from_webkit_root(self, *comps):
        return self._filesystem.abspath(
            self._filesystem.join(self._webkit_root, *comps))

    def read_webkit_prefixed_css_property_list(self):
        prefixed_properties = []

        contents = self._filesystem.read_text_file(self._css_property_file)
        for line in contents.splitlines():
            # Find lines starting with the -webkit- prefix.
            match = re.match('-webkit-[\w|-]*', line)
            if match:
                # Ignore lines where both the prefixed and non-prefixed property
                # are supported - denoted by -webkit-some-property = some-property.
                fields = line.split(self._css_property_split_string)
                if len(fields) == 2 and fields[1].strip() in fields[0].strip():
                    continue
                prefixed_properties.append(match.group(0))

        return prefixed_properties

    def convert_for_webkit(self, new_path, filename):
        """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.

        Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
        contents = self._filesystem.read_binary_file(filename)
        if filename.endswith('.css'):
            return self.convert_css(contents, filename)
        return self.convert_html(new_path, contents, filename)

    def convert_css(self, contents, filename):
        return self.add_webkit_prefix_to_unprefixed_properties(
            contents, filename)

    def convert_html(self, new_path, contents, filename):
        doc = BeautifulSoup(contents)
        did_modify_paths = self.convert_testharness_paths(
            doc, new_path, filename)
        converted_properties_and_content = self.convert_prefixed_properties(
            doc, filename)
        return converted_properties_and_content if (
            did_modify_paths or converted_properties_and_content[0]) else None

    def convert_testharness_paths(self, doc, new_path, filename):
        """ Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.

        Returns whether the document was modified."""

        # Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
        pattern = re.compile('/resources/testharness')
        script_tags = doc.findAll(src=pattern)
        link_tags = doc.findAll(href=pattern)
        testharness_tags = script_tags + link_tags

        if not testharness_tags:
            return False

        resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
        resources_relpath = self._filesystem.relpath(resources_path, new_path)

        for tag in testharness_tags:
            # FIXME: We need to handle img, audio, video tags also.
            attr = 'src'
            if tag.name != 'script':
                attr = 'href'

            if not attr in tag.attrMap:
                # FIXME: Figure out what to do w/ invalid tags. For now, we return False
                # and leave the document unmodified, which means that it'll probably fail to run.
                _log.error("Missing an attr in %s" % filename)
                return False

            old_path = tag[attr]
            new_tag = Tag(doc, tag.name, tag.attrs)
            new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness',
                                   old_path)

            self.replace_tag(tag, new_tag)

        return True

    def convert_prefixed_properties(self, doc, filename):
        """ Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.

        Returns the list of converted properties and the modified document as a string """

        converted_properties = []

        # Look for inline and document styles.
        inline_styles = doc.findAll(style=re.compile('.*'))
        style_tags = doc.findAll('style')
        all_styles = inline_styles + style_tags

        for tag in all_styles:

            # Get the text whether in a style tag or style attribute.
            style_text = ''
            if tag.name == 'style':
                if not tag.contents:
                    continue
                style_text = tag.contents[0]
            else:
                style_text = tag['style']

            updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(
                style_text, filename)

            # Rewrite tag only if changes were made.
            if updated_style_text[0]:
                converted_properties.extend(updated_style_text[0])

                new_tag = Tag(doc, tag.name, tag.attrs)
                new_tag.insert(0, updated_style_text[1])

                self.replace_tag(tag, new_tag)

        return (converted_properties, doc.prettify())

    def add_webkit_prefix_to_unprefixed_properties(self, text, filename):
        """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.

        Returns the list of converted properties and the modified text."""

        converted_properties = []

        for prefixed_property in self.prefixed_properties:
            # FIXME: add in both the prefixed and unprefixed versions, rather than just replacing them?
            # That might allow the imported test to work in other browsers more easily.

            unprefixed_property = prefixed_property.replace('-webkit-', '')

            # Look for the various ways it might be in the CSS
            # Match the the property preceded by either whitespace or left curly brace
            # or at the beginning of the string (for inline style attribute)
            pattern = '([\s{]|^)' + unprefixed_property + '(\s+:|:)'
            if re.search(pattern, text):
                _log.info('converting %s -> %s' %
                          (unprefixed_property, prefixed_property))
                converted_properties.append(prefixed_property)
                text = re.sub(pattern, prefixed_property + ':', text)

        # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
        return (converted_properties, text)

    def replace_tag(self, old_tag, new_tag):
        index = old_tag.parent.contents.index(old_tag)
        old_tag.parent.insert(index, new_tag)
        old_tag.extract()
Exemple #12
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--chromium-android",
                action="store_const", const='chromium-android', dest='platform', help='Alias for --platform=chromium-android'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-drt-flag", action="append",
                default=[], help="Additional command line flag to pass to DumpRenderTree "
                     "Specify multiple times to add multiple flags."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=needs_http):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        try:
            if needs_http:
                self._start_http_servers()
            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        finally:
            if needs_http:
                self._stop_http_servers()

        if self._options.generate_results and not self._options.profile:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
        self._generate_output_files(output_json_path, results_page_path, output)

        if options.test_results_server:
            if options.test_results_server == 'webkit-perf.appspot.com':
                options.test_results_server = 'perf.webkit.org'

            if not self._upload_json(options.test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING

        if options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'tests': {}}
        if description:
            contents['description'] = description

        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': str(revision), 'timestamp': scm.timestamp_of_latest_commit(path, revision)}

        meta_info = {
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        for key, value in meta_info.items():
            if value:
                contents[key] = value

        # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org.
        for metric_full_name, result in self._results.iteritems():
            if not isinstance(result, dict):  # We can't reports results without indivisual measurements.
                continue

            assert metric_full_name.count(':') <= 1
            test_full_name, _, metric = metric_full_name.partition(':')
            if not metric:
                metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[result['unit']]

            tests = contents['tests']
            path = test_full_name.split('/')
            for i in range(0, len(path)):
                # FIXME: We shouldn't assume HTML extension.
                is_last_token = i + 1 == len(path)
                url = 'http://trac.webkit.org/browser/trunk/PerformanceTests/' + '/'.join(path[0:i + 1])
                if is_last_token:
                    url += '.html'

                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test.setdefault('metrics', {})
                    assert metric not in current_test['metrics']
                    current_test['metrics'][metric] = {'current': result['values']}
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
Exemple #13
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option('-t', '--target', dest='configuration',
                                 help='Specify the target build subdirectory under src/out/'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (e.g. mac)"),
            optparse.make_option("--chromium",
                                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--android",
                                 action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
            optparse.make_option("--builder-name",
                                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                                 help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                                 help="Check to ensure the DumpRenderTree build is up to date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                                 help="Don't check to see if the DumpRenderTree build is up to date."),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                                 help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                                 help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                                 help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                                 help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                                 help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                                 help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                                 help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-driver-flag", action="append",
                                 default=[], help="Additional command line flag to pass to DumpRenderTree "
                                 "Specify multiple times to add multiple flags."),
            optparse.make_option("--driver-name", type="string",
                                 help="Alternative DumpRenderTree binary to use"),
            optparse.make_option("--content-shell", action="store_true",
                                 help="Use Content Shell instead of DumpRenderTree"),
            optparse.make_option("--repeat", default=1, type="int",
                                 help="Specify number of times to run test set (default: 1)."),
            optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
                                 help="Specify number of times to invoke test runner for each performance test."),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warning('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(
                    relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path,
                                                    test_runner_count=self._options.test_runner_count)
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        class FakePrinter(object):

            def write_update(self, msg):
                print msg

            def write_throttled_update(self, msg):
                pass

        if self._port.check_build(needs_http=needs_http, printer=FakePrinter()):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        run_count = 0
        repeat = self._options.repeat
        while run_count < repeat:
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s", len(tests), runs)

            try:
                if needs_http:
                    self._start_http_servers()
                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            finally:
                if needs_http:
                    self._stop_http_servers()

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description,
                                             options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        path = self._port.repository_path()
        scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
        revision = str(scm.commit_position(path))
        revisions['chromium'] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = self.view_source_url(
                        'PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {'current': iteration_values}
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents

    @staticmethod
    def view_source_url(path_from_blink):
        return 'https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit/%s' % path_from_blink

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s", slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception as error:
            _log.error("Failed to merge slave configuration JSON file %s: %s", slave_config_json_path, error)
        return None

    def _merge_outputs_if_needed(self, output_json_path, output):
        if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
            return [output]
        try:
            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
            return existing_outputs + [output]
        except Exception as error:
            _log.error("Failed to merge output JSON file %s: %s", output_json_path, error)
        return None

    def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
        url = "https://%s%s" % (test_results_server, host_path)
        uploader = file_uploader(url, 120)
        try:
            response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
        except Exception as error:
            _log.error("Failed to upload JSON file to %s in 120s: %s", url, error)
            return False

        response_body = [line.strip('\n') for line in response]
        if response_body != ['OK']:
            try:
                parsed_response = json.loads('\n'.join(response_body))
            except:
                _log.error("Uploaded JSON to %s but got a bad response:", url)
                for line in response_body:
                    _log.error(line)
                return False
            if parsed_response.get('status') != 'OK':
                _log.error("Uploaded JSON to %s but got an error:", url)
                _log.error(json.dumps(parsed_response, indent=4))
                return False

        _log.info("JSON file uploaded to %s.", url)
        return True

    def _run_tests_set(self, tests):
        failures = 0
        self._results = []

        for i, test in enumerate(tests):
            _log.info('Running %s (%d of %d)', test.test_name(), i + 1, len(tests))
            start_time = time.time()
            metrics = test.run(self._options.time_out_ms)
            if metrics:
                self._results.append((test, metrics))
            else:
                failures += 1
                _log.error('FAILED')

            _log.info('Finished: %f s', time.time() - start_time)
            _log.info('')

        return failures
class W3CTestConverter(object):

    def __init__(self):
        self._host = Host()
        self._filesystem = self._host.filesystem
        self._host.initialize_scm()
        self._webkit_root = self._host.scm().checkout_root

        # These settings might vary between WebKit and Blink
        self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
        self._css_property_split_string = 'alias_for='

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list()

    def path_from_webkit_root(self, *comps):
        return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))

    def read_webkit_prefixed_css_property_list(self):
        prefixed_properties = []

        contents = self._filesystem.read_text_file(self._css_property_file)
        for line in contents.splitlines():
            # Find lines starting with the -webkit- prefix.
            match = re.match('-webkit-[\w|-]*', line)
            if match:
                # Ignore lines where both the prefixed and non-prefixed property
                # are supported - denoted by -webkit-some-property = some-property.
                fields = line.split(self._css_property_split_string)
                if len(fields) == 2 and fields[1].strip() in fields[0].strip():
                    continue
                prefixed_properties.append(match.group(0))

        return prefixed_properties

    def convert_for_webkit(self, new_path, filename):
        """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.

        Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
        contents = self._filesystem.read_binary_file(filename)
        if filename.endswith('.css'):
            return self.convert_css(contents, filename)
        return self.convert_html(new_path, contents, filename)

    def convert_css(self, contents, filename):
        return self.add_webkit_prefix_to_unprefixed_properties(contents, filename)

    def convert_html(self, new_path, contents, filename):
        doc = BeautifulSoup(contents)
        did_modify_paths = self.convert_testharness_paths(doc, new_path, filename)
        converted_properties_and_content = self.convert_prefixed_properties(doc, filename)
        return converted_properties_and_content if (did_modify_paths or converted_properties_and_content[0]) else None

    def convert_testharness_paths(self, doc, new_path, filename):
        """ Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.

        Returns whether the document was modified."""

        # Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
        pattern = re.compile('/resources/testharness')
        script_tags = doc.findAll(src=pattern)
        link_tags = doc.findAll(href=pattern)
        testharness_tags = script_tags + link_tags

        if not testharness_tags:
            return False

        resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
        resources_relpath = self._filesystem.relpath(resources_path, new_path)

        for tag in testharness_tags:
            # FIXME: We need to handle img, audio, video tags also.
            attr = 'src'
            if tag.name != 'script':
                attr = 'href'

            if not attr in tag.attrMap:
                # FIXME: Figure out what to do w/ invalid tags. For now, we return False
                # and leave the document unmodified, which means that it'll probably fail to run.
                _log.error("Missing an attr in %s" % filename)
                return False

            old_path = tag[attr]
            new_tag = Tag(doc, tag.name, tag.attrs)
            new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness', old_path)

            self.replace_tag(tag, new_tag)

        return True

    def convert_prefixed_properties(self, doc, filename):
        """ Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.

        Returns the list of converted properties and the modified document as a string """

        converted_properties = []

        # Look for inline and document styles.
        inline_styles = doc.findAll(style=re.compile('.*'))
        style_tags = doc.findAll('style')
        all_styles = inline_styles + style_tags

        for tag in all_styles:

            # Get the text whether in a style tag or style attribute.
            style_text = ''
            if tag.name == 'style':
                if not tag.contents:
                    continue
                style_text = tag.contents[0]
            else:
                style_text = tag['style']

            updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(style_text, filename)

            # Rewrite tag only if changes were made.
            if updated_style_text[0]:
                converted_properties.extend(updated_style_text[0])

                new_tag = Tag(doc, tag.name, tag.attrs)
                new_tag.insert(0, updated_style_text[1])

                self.replace_tag(tag, new_tag)

        return (converted_properties, doc.prettify())

    def add_webkit_prefix_to_unprefixed_properties(self, text, filename):
        """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.

        Returns the list of converted properties and the modified text."""

        converted_properties = []

        for prefixed_property in self.prefixed_properties:
            # FIXME: add in both the prefixed and unprefixed versions, rather than just replacing them?
            # That might allow the imported test to work in other browsers more easily.

            unprefixed_property = prefixed_property.replace('-webkit-', '')

            # Look for the various ways it might be in the CSS
            # Match the the property preceded by either whitespace or left curly brace
            # or at the beginning of the string (for inline style attribute)
            pattern = '([\s{]|^)' + unprefixed_property + '(\s+:|:)'
            if re.search(pattern, text):
                _log.info('converting %s -> %s' % (unprefixed_property, prefixed_property))
                converted_properties.append(prefixed_property)
                text = re.sub(pattern, prefixed_property + ':', text)

        # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
        return (converted_properties, text)

    def replace_tag(self, old_tag, new_tag):
        index = old_tag.parent.contents.index(old_tag)
        old_tag.parent.insert(index, new_tag)
        old_tag.extract()
Exemple #15
0
def parse_args(args):
    option_group_definitions = []

    option_group_definitions.append(("Platform options", platform_options()))
    option_group_definitions.append(
        ("Configuration options", configuration_options()))
    option_group_definitions.append(
        ("Printing Options", printing.print_options()))

    option_group_definitions.append(("Feature Switches", [
        optparse.make_option(
            "--complex-text",
            action="store_true",
            default=False,
            help=
            "Use the complex text code path for all text (OS X and Windows only)"
        ),
        optparse.make_option("--accelerated-drawing",
                             action="store_true",
                             default=False,
                             help="Use accelerated drawing (OS X only)"),
        optparse.make_option(
            "--remote-layer-tree",
            action="store_true",
            default=False,
            help="Use the remote layer tree drawing model (OS X WebKit2 only)"
        ),
        optparse.make_option(
            "--internal-feature",
            type="string",
            action="append",
            default=[],
            help=
            "Enable (disable) an internal feature (--internal-feature FeatureName[=true|false])"
        ),
        optparse.make_option(
            "--experimental-feature",
            type="string",
            action="append",
            default=[],
            help=
            "Enable (disable) an experimental feature (--experimental-feature FeatureName[=true|false])"
        ),
    ]))

    option_group_definitions.append((
        "WebKit Options",
        [
            optparse.make_option(
                "--gc-between-tests",
                action="store_true",
                default=False,
                help="Force garbage collection between each test"),
            optparse.make_option(
                "-l",
                "--leaks",
                action="store_true",
                default=False,
                help="Enable leaks checking (OS X and Gtk+ only)"),
            optparse.make_option("-g",
                                 "--guard-malloc",
                                 action="store_true",
                                 default=False,
                                 help="Enable Guard Malloc (OS X only)"),
            optparse.make_option(
                "--threaded",
                action="store_true",
                default=False,
                help="Run a concurrent JavaScript thread with each test"),
            optparse.make_option(
                "--dump-render-tree",
                "-1",
                action="store_false",
                default=True,
                dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            # FIXME: We should merge this w/ --build-directory and only have one flag.
            optparse.make_option(
                "--root",
                action="store",
                help=
                "Path to a directory containing the executables needed to run tests."
            ),
        ]))

    option_group_definitions.append((
        "Results Options",
        [
            optparse.make_option("-p",
                                 "--pixel",
                                 "--pixel-tests",
                                 action="store_true",
                                 dest="pixel_tests",
                                 help="Enable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-pixel",
                "--no-pixel-tests",
                action="store_false",
                dest="pixel_tests",
                help="Disable pixel-to-pixel PNG comparisons"),
            optparse.make_option(
                "--no-sample-on-timeout",
                action="store_false",
                default=True,
                dest="sample_on_timeout",
                help="Don't run sample on timeout (OS X only)"),
            optparse.make_option("--no-ref-tests",
                                 action="store_true",
                                 dest="no_ref_tests",
                                 help="Skip all ref tests"),
            optparse.make_option(
                "--ignore-render-tree-dump-results",
                action="store_true",
                dest="ignore_render_tree_dump_results",
                help=
                "Don't compare or save results for render tree dump tests (they still run and crashes are reported)"
            ),
            optparse.make_option(
                "--tolerance",
                help="Ignore image differences less than this percentage (some "
                "ports may ignore this option)",
                type="float"),
            optparse.make_option("--results-directory",
                                 help="Location of test results"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option(
                "--add-platform-exceptions",
                action="store_true",
                default=False,
                help=
                "Save generated results into the *most-specific-platform* directory rather than the *generic-platform* directory"
            ),
            optparse.make_option(
                "--new-baseline",
                action="store_true",
                default=False,
                help="Save generated results as new baselines "
                "into the *most-specific-platform* directory, overwriting whatever's "
                "already there. Equivalent to --reset-results --add-platform-exceptions"
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                default=False,
                help="Reset expectations to the "
                "generated results in their existing location."),
            optparse.make_option(
                "--no-new-test-results",
                action="store_false",
                dest="new_test_results",
                default=True,
                help="Don't create new baselines when no expected results exist"
            ),
            optparse.make_option(
                "--treat-ref-tests-as-pixel-tests",
                action="store_true",
                default=False,
                help=
                "Run ref tests, but treat them as if they were traditional pixel tests"
            ),

            #FIXME: we should support a comma separated list with --pixel-test-directory as well.
            optparse.make_option(
                "--pixel-test-directory",
                action="append",
                default=[],
                dest="pixel_test_directories",
                help=
                "A directory where it is allowed to execute tests as pixel tests. "
                "Specify multiple times to add multiple directories. "
                "This option implies --pixel-tests. If specified, only those tests "
                "will be executed as pixel tests that are located in one of the "
                "directories enumerated with the option. Some ports may ignore this "
                "option while others can have a default value that can be overridden here."
            ),
            optparse.make_option(
                "--skip-failing-tests",
                action="store_true",
                default=False,
                help="Skip tests that are marked as failing or flaky. "
                "Note: When using this option, you might miss new crashes "
                "in these tests."),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--additional-platform-directory",
                action="append",
                default=[],
                help="Additional directory where to look for test "
                "baselines (will take precendence over platform baselines). "
                "Specify multiple times to add multiple search path entries."),
            optparse.make_option(
                "--additional-expectations",
                action="append",
                default=[],
                help=
                "Path to a test_expectations file that will override previous expectations. "
                "Specify multiple times for multiple sets of overrides."),
            optparse.make_option(
                "--compare-port",
                action="store",
                default=None,
                help="Use the specified port's baselines first"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests "
                "are done"),
            optparse.make_option(
                "--full-results-html",
                action="store_true",
                default=False,
                help=
                "Show all failures in results.html, rather than only regressions"
            ),
            optparse.make_option(
                "--clobber-old-results",
                action="store_true",
                default=False,
                help="Clobbers test results from previous runs."),
            optparse.make_option(
                "--http",
                action="store_true",
                dest="http",
                default=True,
                help="Run HTTP and WebSocket tests (default)"),
            optparse.make_option("--no-http",
                                 action="store_false",
                                 dest="http",
                                 help="Don't run HTTP and WebSocket tests"),
            optparse.make_option("--no-http-servers",
                                 action="store_false",
                                 dest="start_http_servers_if_needed",
                                 default=True,
                                 help="Don't start HTTP servers"),
            optparse.make_option(
                "--ignore-metrics",
                action="store_true",
                dest="ignore_metrics",
                default=False,
                help="Ignore rendering metrics related information from test "
                "output, only compare the structure of the rendertree."),
            optparse.make_option(
                "--nocheck-sys-deps",
                action="store_true",
                default=False,
                help="Don't check the system dependencies (themes)"),
            optparse.make_option("--java",
                                 action="store_true",
                                 default=False,
                                 help="Build java support files"),
            optparse.make_option(
                "--layout-tests-directory",
                action="store",
                default=None,
                help="Override the default layout test directory.",
                dest="layout_tests_dir")
        ]))

    option_group_definitions.append(("Testing Options", [
        optparse.make_option("--build", dest="build",
            action="store_true", default=True,
            help="Check to ensure the DumpRenderTree build is up-to-date "
                 "(default)."),
        optparse.make_option("--no-build", dest="build",
            action="store_false", help="Don't check to see if the "
                                       "DumpRenderTree build is up-to-date."),
        optparse.make_option("-n", "--dry-run", action="store_true",
            default=False,
            help="Do everything but actually run the tests or upload results."),
        optparse.make_option("--wrapper",
            help="wrapper command to insert before invocations of "
                 "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
        optparse.make_option("-i", "--ignore-tests", action="append", default=[],
            help="directories or test to ignore (may specify multiple times)"),
        optparse.make_option("--test-list", action="append",
            help="read list of tests to run from file", metavar="FILE"),
        optparse.make_option("--skipped", action="store", default="default",
            help=("control how tests marked SKIP are run. "
                 "'default' == Skip tests unless explicitly listed on the command line, "
                 "'ignore' == Run them anyway, "
                 "'only' == only run the SKIP tests, "
                 "'always' == always skip, even if listed on the command line.")),
        optparse.make_option("--force", action="store_true", default=False,
            help="Run all tests with PASS as expected result, even those marked SKIP in the test list or " + \
                 "those which are device-specific (implies --skipped=ignore)"),
        optparse.make_option("--time-out-ms", "--timeout",
            help="Set the timeout for each test in milliseconds"),
        optparse.make_option("--order", action="store", default="natural",
            help=("determine the order in which the test cases will be run. "
                  "'none' == use the order in which the tests were listed either in arguments or test list, "
                  "'natural' == use the natural order (default), "
                  "'random' == randomize the test order.")),
        optparse.make_option("--run-chunk",
            help=("Run a specified chunk (n:l), the nth of len l, "
                 "of the layout tests")),
        optparse.make_option("--run-part", help=("Run a specified part (n:m), "
                  "the nth of m parts, of the layout tests")),
        optparse.make_option("--batch-size",
            help=("Run a the tests in batches (n), after every n tests, "
                  "DumpRenderTree is relaunched."), type="int", default=None),
        optparse.make_option("--run-singly", action="store_true",
            default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
        optparse.make_option("--child-processes",
            help="Number of DumpRenderTrees to run in parallel."),
        # FIXME: Display default number of child processes that will run.
        optparse.make_option("-f", "--fully-parallel", action="store_true",
            help="run all tests in parallel"),
        optparse.make_option("--exit-after-n-failures", type="int", default=None,
            help="Exit after the first N failures instead of running all "
            "tests"),
        optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
            default=None, help="Exit after the first N crashes instead of "
            "running all tests"),
        optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
        optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
        optparse.make_option("--retry-failures", action="store_true",
            default=True,
            help="Re-try any tests that produce unexpected results (default)"),
        optparse.make_option("--no-retry-failures", action="store_false",
            dest="retry_failures",
            help="Don't re-try any tests that produce unexpected results."),
        optparse.make_option("--max-locked-shards", type="int", default=0,
            help="Set the maximum number of locked shards"),
        optparse.make_option("--additional-env-var", type="string", action="append", default=[],
            help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
        optparse.make_option("--profile", action="store_true",
            help="Output per-test profile information."),
        optparse.make_option("--profiler", action="store",
            help="Output per-test profile information, using the specified profiler."),
        optparse.make_option("--no-timeout", action="store_true", default=False, help="Disable test timeouts"),
        optparse.make_option('--display-server', choices=['xvfb', 'xorg', 'weston', 'wayland'], default='xvfb',
            help='"xvfb": Use a virtualized X11 server. "xorg": Use the current X11 session. '
                 '"weston": Use a virtualized Weston server. "wayland": Use the current wayland session.'),
        optparse.make_option("--world-leaks", action="store_true", default=False, help="Check for world leaks (currently, only documents). Differs from --leaks in that this uses internal instrumentation, rather than external tools."),
        optparse.make_option("--accessibility-isolated-tree", action="store_true", default=False, help="Runs tests in accessibility isolated tree mode."),
    ]))

    option_group_definitions.append(("iOS Options", [
        optparse.make_option(
            '--no-install',
            action='store_const',
            const=False,
            default=True,
            dest='install',
            help='Skip install step for device and simulator testing'),
        optparse.make_option(
            '--version',
            help=
            'Specify the version of iOS to be used. By default, this will adopt the runtime for iOS Simulator.'
        ),
        optparse.make_option(
            '--device-type',
            help=
            'iOS Simulator device type identifier (default: i386 -> iPhone 5, x86_64 -> iPhone SE)'
        ),
        optparse.make_option(
            '--dedicated-simulators',
            action="store_true",
            default=False,
            help=
            "If set, dedicated iOS simulators will always be created.  If not set, the script will attempt to use any currently running simulator."
        ),
        optparse.make_option(
            '--show-touches',
            action="store_true",
            default=False,
            help=
            "If set, a small dot will be shown where the generated touches are. Helpful for debugging touch tests."
        ),
    ]))

    option_group_definitions.append(("Miscellaneous Options", [
        optparse.make_option(
            "--lint-test-files",
            action="store_true",
            default=False,
            help=
            ("Makes sure the test files parse for all configurations. Does not run any tests."
             )),
        optparse.make_option(
            "--print-expectations",
            action="store_true",
            default=False,
            help=
            ("Print the expected outcome for the given test, or all tests listed in TestExpectations. Does not run any tests."
             )),
        optparse.make_option(
            "--webgl-test-suite",
            action="store_true",
            default=False,
            help=
            ("Run exhaustive webgl list, including test ordinarily skipped for performance reasons. Equivalent to '--additional-expectations=LayoutTests/webgl/TestExpectations webgl'"
             )),
        optparse.make_option(
            "--use-gpu-process",
            action="store_true",
            default=False,
            help=
            ("Enable all GPU process related features, also set additional expectations and the result report flavor."
             )),
        optparse.make_option(
            "--prefer-integrated-gpu",
            action="store_true",
            default=False,
            help=
            ("Prefer using the lower-power integrated GPU on a dual-GPU system. Note that other running applications and the tests themselves can override this request."
             )),
    ]))

    option_group_definitions.append(("Web Platform Test Server Options", [
        optparse.make_option(
            "--wptserver-doc-root",
            type="string",
            help=
            ("Set web platform server document root, relative to LayoutTests directory"
             )),
    ]))

    # FIXME: Remove this group once the old results dashboards are deprecated.
    option_group_definitions.append(("Legacy Result Options", [
        optparse.make_option("--master-name",
                             help="The name of the buildbot master."),
        optparse.make_option(
            "--build-name",
            default="DUMMY_BUILD_NAME",
            help=(
                "The name of the builder used in its path, e.g. webkit-rel.")),
        optparse.make_option(
            "--build-slave",
            default="DUMMY_BUILD_SLAVE",
            help=("The name of the worker used. e.g. apple-macpro-6.")),
        optparse.make_option(
            "--test-results-server",
            action="append",
            default=[],
            help=
            ("If specified, upload results json files to this appengine server."
             )),
        optparse.make_option(
            "--results-server-host",
            action="append",
            default=[],
            help=(
                "If specified, upload results JSON file to this results server."
            )),
        optparse.make_option(
            "--additional-repository-name",
            help=("The name of an additional subversion or git checkout")),
        optparse.make_option(
            "--additional-repository-path",
            help=
            ("The path to an additional subversion or git checkout (requires --additional-repository-name)"
             )),
        optparse.make_option(
            "--allowed-host",
            type="string",
            action="append",
            default=[],
            help=
            ("If specified, tests are allowed to make requests to the specified hostname."
             ))
    ]))

    option_group_definitions.append(('Upload Options', upload_options()))

    option_parser = optparse.OptionParser(usage="%prog [options] [<path>...]")

    for group_name, group_options in option_group_definitions:
        option_group = optparse.OptionGroup(option_parser, group_name)
        option_group.add_options(group_options)
        option_parser.add_option_group(option_group)

    options, args = option_parser.parse_args(args)
    if options.webgl_test_suite:
        if not args:
            args.append('webgl')
        host = Host()
        host.initialize_scm()
        options.additional_expectations.insert(
            0,
            host.filesystem.join(host.scm().checkout_root,
                                 'LayoutTests/webgl/TestExpectations'))

    if options.use_gpu_process:
        host = Host()
        host.initialize_scm()
        options.additional_expectations.insert(
            0,
            host.filesystem.join(host.scm().checkout_root,
                                 'LayoutTests/gpu-process/TestExpectations'))
        if not options.internal_feature:
            options.internal_feature = []
        options.internal_feature.append('UseGPUProcessForMediaEnabled')
        options.internal_feature.append('CaptureAudioInGPUProcessEnabled')
        options.internal_feature.append('CaptureVideoInGPUProcessEnabled')
        options.internal_feature.append(
            'UseGPUProcessForCanvasRenderingEnabled')
        options.internal_feature.append('UseGPUProcessForDOMRenderingEnabled')
        options.internal_feature.append('UseGPUProcessForWebGLEnabled')
        if not options.experimental_feature:
            options.experimental_feature = []
        options.experimental_feature.append(
            'WebRTCPlatformCodecsInGPUProcessEnabled')
        if options.result_report_flavor:
            raise RuntimeError(
                '--use-gpu-process implicitly sets the result flavor, this should not be overridden'
            )
        options.result_report_flavor = 'gpuprocess'

    return options, args
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform,
                                                     self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                                 action="store_const",
                                 const='chromium',
                                 dest='platform',
                                 help='Alias for --platform=chromium'),
            optparse.make_option("--chromium-android",
                                 action="store_const",
                                 const='chromium-android',
                                 dest='platform',
                                 help='Alias for --platform=chromium-android'),
            optparse.make_option(
                "--builder-name",
                help=
                ("The name of the builder shown on the waterfall running this script e.g. google-mac-2."
                 )),
            optparse.make_option(
                "--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=
                "Check to ensure the DumpRenderTree build is up-to-date (default)."
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help=
                "Don't check to see if the DumpRenderTree build is up-to-date."
            ),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=600 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option(
                "--output-json-path",
                action='callback',
                callback=_expand_path,
                type="str",
                help=
                "Path to generate a JSON file at; may contain previous results if it already exists."
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help=
                "Clears the content in the generated JSON file before adding the results."
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action='callback',
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option(
                "--description",
                help=
                "Add a description to the output JSON file if one is generated"
            ),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                "--test-results-server",
                help=
                "Upload the generated JSON file to the specified server when --output-json-path is present."
            ),
            optparse.make_option("--replay",
                                 dest="replay",
                                 action="store_true",
                                 default=False,
                                 help="Run replay tests."),
            optparse.make_option(
                "--force",
                dest="use_skipped_list",
                action="store_false",
                default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile",
                                 action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option(
                "--profiler",
                action="store",
                help=
                "Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags."),
            optparse.make_option(
                "--driver-name",
                type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--content-shell",
                action="store_true",
                help="Use Content Shell instead of DumpRenderTree"),
            optparse.make_option(
                "--repeat",
                default=1,
                type="int",
                help="Specify number of times to run test set (default: 1)."),
            optparse.make_option(
                "--test-runner-count",
                default=DEFAULT_TEST_RUNNER_COUNT,
                type="int",
                help=
                "Specify number of times to invoke test runner for each performance test."
            ),
        ]
        return optparse.OptionParser(
            option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path,
                                                     relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths,
                                     skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace(
                '\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(
                    relative_path) and filesystem.normpath(
                        relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(
                self._port,
                relative_path,
                path,
                test_runner_count=self._options.test_runner_count)
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=needs_http):
            _log.error("Build not up to date for %s" %
                       self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        run_count = 0
        repeat = self._options.repeat
        while (run_count < repeat):
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count,
                                        repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            try:
                if needs_http:
                    self._start_http_servers()
                unexpected = self._run_tests_set(
                    sorted(list(tests), key=lambda test: test.test_name()))

            finally:
                if needs_http:
                    self._stop_http_servers()

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(
                    test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(),
                                          self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(
            self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp,
                                             options.description,
                                             options.platform,
                                             options.builder_name,
                                             options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(
                options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(),
                                        'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(
            self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%',
                                        absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%',
                                            json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive
                              ).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {
                'revision': revision,
                'timestamp': scm.timestamp_of_revision(path, revision)
            }

        meta_info = {
            'description':
            description,
            'buildTime':
            self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform':
            platform,
            'revisions':
            revisions,
            'builderName':
            builder_name,
            'buildNumber':
            int(build_number) if build_number else None
        }

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(
                        iteration_values, list
                ):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url('PerformanceTests/' +
                                          (test.test_name() if is_last_token
                                           else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {
                            'current': iteration_values
                        }
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" %
                       slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(
                slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" %
                       (slave_config_json_path, error))
        return None
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date.",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page.",
            ),
            optparse.make_option(
                "--output-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists.",
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help="Clears the content in the generated JSON file before adding the results.",
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file.",
            ),
            optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests are done",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present.",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
            optparse.make_option(
                "--replay", dest="replay", action="store_true", default=False, help="Run replay tests."
            ),
            optparse.make_option(
                "--force",
                dest="use_skipped_list",
                action="store_false",
                default=True,
                help="Run all tests, including the ones in the Skipped list.",
            ),
            optparse.make_option("--profile", action="store_true", help="Output per-test profile information."),
            optparse.make_option(
                "--profiler", action="store", help="Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags.",
            ),
            optparse.make_option("--driver-name", type="string", help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--repeat", default=1, type="int", help="Specify number of times to run test set (default: 1)."
            ),
            optparse.make_option(
                "--test-runner-count",
                default=DEFAULT_TEST_RUNNER_COUNT,
                type="int",
                help="Specify number of times to invoke test runner for each performance test.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = [".html", ".svg"]
        if self._options.replay:
            test_extensions.append(".replay")

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn("Path was not found:" + arg)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace("\\", "/")
            if (
                self._options.use_skipped_list
                and self._port.skips_perf_test(relative_path)
                and filesystem.normpath(relative_path) not in paths
            ):
                continue
            test = PerfTestFactory.create_perf_test(
                self._port, relative_path, path, test_runner_count=self._options.test_runner_count
            )
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=needs_http):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        run_count = 0
        repeat = self._options.repeat
        while run_count < repeat:
            run_count += 1

            tests = self._collect_tests()
            runs = " (Run %d of %d)" % (run_count, repeat) if repeat > 1 else ""
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            try:
                if needs_http:
                    self._start_http_servers()
                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            finally:
                if needs_http:
                    self._stop_http_servers()

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + ".html"

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(
            self._timestamp, options.description, options.platform, options.builder_name, options.build_number
        )

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), "resources/results-template.html")
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace("%AbsolutePathToWebKitTrunk%", absolute_path_to_trunk)
        results_page = results_page.replace("%PeformanceTestsResultsJSON%", json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {"revision": revision, "timestamp": scm.timestamp_of_revision(path, revision)}

        meta_info = {
            "description": description,
            "buildTime": self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            "platform": platform,
            "revisions": revisions,
            "builderName": builder_name,
            "buildNumber": int(build_number) if build_number else None,
        }

        contents = {"tests": {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents["tests"]
                path = test.test_name_without_file_extension().split("/")
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url(
                        "PerformanceTests/" + (test.test_name() if is_last_token else "/".join(path[0 : i + 1]))
                    )
                    tests.setdefault(path[i], {"url": url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault("metrics", {})
                        assert metric_name not in current_test["metrics"]
                        current_test["metrics"][metric_name] = {"current": iteration_values}
                    else:
                        current_test.setdefault("tests", {})
                        tests = current_test["tests"]

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents["builder" + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None