コード例 #1
0
ファイル: rebaseline_server.py プロジェクト: mirror/chromium
    def _prepare_config(self, options, args, tool):
        results_directory = args[0]
        host = Host()
        host.initialize_scm()

        print 'Parsing full_results.json...'
        results_json_path = host.filesystem.join(results_directory, 'full_results.json')
        results_json = json_results_generator.load_json(host.filesystem, results_json_path)

        port = tool.port_factory.get()
        layout_tests_directory = port.layout_tests_dir()
        platforms = host.filesystem.listdir(host.filesystem.join(layout_tests_directory, 'platform'))
        self._test_config = TestConfig(port, layout_tests_directory, results_directory, platforms, host)

        print 'Gathering current baselines...'
        self._gather_baselines(results_json)

        return {
            'test_config': self._test_config,
            "results_json": results_json,
            "platforms_json": {
                'platforms': platforms,
                'defaultPlatform': port.name(),
            },
        }
コード例 #2
0
    def __init__(self, path):
        MultiCommandTool.__init__(self)
        Host.__init__(self)
        self._path = path
        self.status_server = StatusServer()

        self.wakeup_event = threading.Event()
        self._deprecated_port = None
コード例 #3
0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from webkitpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    # Need to generate MANIFEST.json since some expectations correspond to WPT
    # tests that aren't files and only exist in the manifest.
    _log.info('Generating MANIFEST.json for web-platform-tests ...')
    WPTManifest.ensure_manifest(host)

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
コード例 #4
0
def convert_for_webkit(new_path, filename, reference_support_info,
                       host=Host()):
    """Converts a file's contents so the Blink layout test runner can run it.

    Args:
        new_path: Absolute path where file will be copied to in the Chromium repo.
        filename: Absolute path to where the file is.
        reference_support_info: Dict of information about a related reference HTML, if any.

    Returns:
        A pair of (list of modified CSS properties, modified text) if the file
        should be modified; None, if the file is not modified.
    """
    # Conversion is not necessary for any tests in wpt now; see http://crbug.com/654081.
    if re.search(r'[/\\]imported[/\\]wpt[/\\]', new_path):
        return None

    contents = host.filesystem.read_binary_file(filename)
    converter = _W3CTestConverter(new_path, filename, reference_support_info,
                                  host)
    if filename.endswith('.css'):
        return converter.add_webkit_prefix_to_unprefixed_properties(
            contents.decode('utf-8'))
    else:
        try:
            converter.feed(contents.decode('utf-8'))
        except UnicodeDecodeError:
            converter.feed(contents.decode('utf-16'))
        converter.close()
        return converter.output()
コード例 #5
0
def main(_argv, _stdout, _stderr):
    options, args = parse_args()
    import_dir = args[0]
    if len(args) == 1:
        repo_dir_parts = []
        for part in import_dir.split(os.path.sep):
            if part in VALID_TEST_STATUSES:
                break
            else:
                repo_dir_parts.append(part)
        repo_dir = os.path.sep.join(repo_dir_parts)
    else:
        repo_dir = args[1]

    if not os.path.exists(import_dir):
        sys.exit('Source directory %s not found!' % import_dir)

    if not os.path.exists(repo_dir):
        sys.exit('Repository directory %s not found!' % repo_dir)
    if not repo_dir in import_dir:
        sys.exit('Repository directory %s must be a parent of %s' % (repo_dir, import_dir))

    configure_logging()

    test_importer = TestImporter(Host(), import_dir, repo_dir, options)
    test_importer.do_import()
コード例 #6
0
def main(server_constructor, input_fn=None, argv=None, **kwargs):
    input_fn = input_fn or raw_input

    option_parser = optparse.OptionParser()
    option_parser.add_option('--output-dir', dest='output_dir',
                             default=None, help='output directory.')
    option_parser.add_option('-v', '--verbose', action='store_true')
    options, args = option_parser.parse_args(argv)

    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG if options.verbose else logging.INFO)

    host = Host()
    port_obj = host.port_factory.get()
    if not options.output_dir:
        options.output_dir = port_obj.default_results_directory()

    server = server_constructor(port_obj, options.output_dir, **kwargs)
    server.start()
    try:
        _ = input_fn('Hit any key to stop the server and exit.')
    except (KeyboardInterrupt, EOFError) as e:
        pass

    server.stop()
コード例 #7
0
def main(argv, _, stderr):
    parser = optparse.OptionParser(option_list=platform_options(
        use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    options, _ = parser.parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        exit_status = run_checks(host, options, stderr)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
コード例 #8
0
    def __init__(self, new_path, filename, host=Host()):
        HTMLParser.__init__(self)

        self._host = host
        self._filesystem = self._host.filesystem
        self._webkit_root = WebKitFinder(self._filesystem).webkit_base()

        self.converted_data = []
        self.converted_properties = []
        self.in_style_tag = False
        self.style_data = []
        self.filename = filename

        resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
        resources_relpath = self._filesystem.relpath(resources_path, new_path)
        self.resources_relpath = resources_relpath

        # These settings might vary between WebKit and Blink
        self._css_property_file = self.path_from_webkit_root(
            'Source', 'core', 'css', 'CSSProperties.in')

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list(
        )

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list(
        )
        prop_regex = '([\s{]|^)(' + "|".join(
            prop.replace('-webkit-', '')
            for prop in self.prefixed_properties) + ')(\s+:|:)'
        self.prop_re = re.compile(prop_regex)
コード例 #9
0
def main(argv, stdout, stderr):
    options, args = parse_args(argv)
    host = Host()

    try:
        options.webkit_test_runner = True
        port = host.port_factory.get(options.platform, options)
    except NotImplementedError as e:
        print(str(e), file=stderr)
        return EXCEPTIONAL_EXIT_STATUS

    # Some platforms do not support API tests
    does_not_support_api_tests = ['ios-device']
    if port.operating_system() in does_not_support_api_tests:
        print('{} cannot run API tests'.format(port.operating_system()),
              file=stderr)
        return EXCEPTIONAL_EXIT_STATUS

    try:
        return run(port, options, args, stderr)
    except KeyboardInterrupt:
        return INTERRUPT_EXIT_STATUS
    except BaseException as e:
        if isinstance(e, Exception):
            print('\n%s raised: %s' % (e.__class__.__name__, str(e)),
                  file=stderr)
            traceback.print_exc(file=stderr)
        return EXCEPTIONAL_EXIT_STATUS
コード例 #10
0
def main(argv, stdout, stderr):
    options, args = parse_args(argv)

    if options.platform and 'test' in options.platform and not 'browser_test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    try:
        port = host.port_factory.get(options.platform, options)
    except (NotImplementedError, ValueError) as error:
        # FIXME: is this the best way to handle unsupported port names?
        print >> stderr, str(error)
        return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS

    try:
        return run(port, options, args, stderr, stdout).exit_code

    # We need to still handle KeyboardInterrupt, at least for webkitpy unittest cases.
    except KeyboardInterrupt:
        return test_run_results.INTERRUPTED_EXIT_STATUS
    except test_run_results.TestRunException as error:
        print >> stderr, error.msg
        return error.code
    except BaseException as error:
        if isinstance(error, Exception):
            print >> stderr, '\n%s raised: %s' % (error.__class__.__name__,
                                                  error)
            traceback.print_exc(file=stderr)
        return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
コード例 #11
0
 def __init__(self, args=None, port=None):
     self._options, self._args = PerfTestsRunner._parse_args(args)
     if port:
         self._port = port
         self._host = self._port.host
     else:
         self._host = Host()
         self._port = self._host.port_factory.get(self._options.platform,
                                                  self._options)
     self._host.initialize_scm()
     self._webkit_base_dir_len = len(self._port.webkit_base())
     self._base_path = self._port.perf_tests_dir()
     self._results = {}
     self._timestamp = time.time()
     self._needs_http = None
     self._has_http_lock = False
コード例 #12
0
    def run(self):
        if not self.host:
            self.host = Host()
        if not self._running_inline:
            self._set_up_logging()

        worker = self._worker
        exception_msg = ""
        _log.debug("%s starting" % self.name)

        try:
            if hasattr(worker, 'start'):
                worker.start()
            while True:
                message = self._messages_to_worker.get()
                if message.from_user:
                    worker.handle(message.name, message.src, *message.args)
                    self._yield_to_manager()
                else:
                    assert message.name == 'stop', 'bad message %s' % repr(message)
                    break

            _log.debug("%s exiting" % self.name)
        except Queue.Empty:
            assert False, '%s: ran out of messages in worker queue.' % self.name
        except KeyboardInterrupt, e:
            self._raise(sys.exc_info())
コード例 #13
0
    def _prepare_config(self, options, args, tool):
        results_directory = args[0]
        scm = self._tool.scm()
        host = Host()
        filesystem = host.filesystem

        print('Parsing full_results.json...')
        results_json_path = filesystem.join(results_directory,
                                            'full_results.json')
        results_json = json_results_generator.load_json(
            filesystem, results_json_path)

        port = tool.port_factory.get()
        layout_tests_directory = port.layout_tests_dir()
        platforms = filesystem.listdir(
            filesystem.join(layout_tests_directory, 'platform'))
        self._test_config = TestConfig(port, layout_tests_directory,
                                       results_directory, platforms, scm, host)

        print('Gathering current baselines...')
        self._gather_baselines(results_json)

        return {
            'test_config': self._test_config,
            "results_json": results_json,
            "platforms_json": {
                'platforms': platforms,
                'defaultPlatform': port.name(),
            },
        }
コード例 #14
0
    def test_import_dir_with_no_tests_and_no_hg(self):
        # FIXME: Use MockHosts instead.
        host = Host()
        host.executive = MockExecutive2(exception=OSError())

        importer = TestImporter(host, None, optparse.Values({"overwrite": False}))
        importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
        importer.destination_directory = tempfile.mkdtemp(prefix='csswg')

        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
            shutil.rmtree(importer.destination_directory, ignore_errors=True)
コード例 #15
0
ファイル: cli_wrapper.py プロジェクト: nwjs/chromium-iot
def main(server_constructor, input_fn=None, argv=None, **kwargs):
    input_fn = input_fn or raw_input

    parser = argparse.ArgumentParser()
    parser.add_argument('--output-dir', type=str, default=None,
                        help='output directory, for log files etc.')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='print more information, including port numbers')
    args = parser.parse_args(argv)

    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG if args.verbose else logging.INFO)

    host = Host()
    port_obj = host.port_factory.get()
    if not args.output_dir:
        args.output_dir = port_obj.default_results_directory()

    # Create the output directory if it doesn't already exist.
    port_obj.host.filesystem.maybe_make_directory(args.output_dir)

    server = server_constructor(port_obj, args.output_dir, **kwargs)
    server.start()
    try:
        _ = input_fn('Hit any key to stop the server and exit.')
    except (KeyboardInterrupt, EOFError) as e:
        pass

    server.stop()
コード例 #16
0
    def test_import_dir_with_no_tests(self):
        # FIXME: Use MockHosts instead.
        host = Host()
        host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))

        importer = TestImporter(host, None, optparse.Values({"overwrite": False}))
        importer.source_directory = importer.path_from_webkit_root("Tools", "Scripts", "webkitpy", "w3c")
        importer.destination_directory = tempfile.mkdtemp(prefix='csswg')

        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
            shutil.rmtree(importer.destination_directory, ignore_errors=True)
コード例 #17
0
def main(_argv, _stdout, _stderr):
    options, test_paths = parse_args(_argv)

    configure_logging()

    test_importer = TestImporter(Host(), test_paths, options)
    test_importer.do_import()
コード例 #18
0
def main(argv):
    parser = optparse.OptionParser(usage='%prog [stats.json]')
    parser.description = "Prints out lists of tests run on each worker as per the stats.json file."
    options, args = parser.parse_args(argv)

    if args and args[0]:
        stats_path = args[0]
    else:
        host = Host()
        stats_path = host.filesystem.join(
            host.port_factory.get().results_directory(), 'stats.json')

    with open(stats_path, 'r') as fp:
        stats_trie = json.load(fp)

    stats = convert_trie_to_flat_paths(stats_trie)
    stats_by_worker = {}
    for test_name, data in stats.items():
        worker = "worker/" + str(data["results"][0])
        if worker not in stats_by_worker:
            stats_by_worker[worker] = []
        test_number = data["results"][1]
        stats_by_worker[worker].append({
            "name": test_name,
            "number": test_number
        })

    for worker in sorted(stats_by_worker.keys()):
        print worker + ':'
        for test in sorted(stats_by_worker[worker],
                           key=lambda test: test["number"]):
            print test["name"]
        print
コード例 #19
0
def convert_for_webkit(new_path,
                       filename,
                       reference_support_info,
                       host=Host(),
                       convert_test_harness_links=True,
                       webkit_test_runner_options=''):
    """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.

    Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
    contents = host.filesystem.read_text_file(filename)

    # WebKit does not have a www test domain.
    contents = contents.replace('{{domains[www]}}', '{{hosts[alt][]}}')

    converter = _W3CTestConverter(new_path, filename, reference_support_info,
                                  host, convert_test_harness_links,
                                  webkit_test_runner_options)
    if filename.endswith('.css'):
        return converter.add_webkit_prefix_to_unprefixed_properties_and_values(
            contents)
    elif filename.endswith('.js'):
        return ([], [], contents)
    else:
        converter.feed(contents)
        converter.close()
        return converter.output()
コード例 #20
0
    def test_generate_repaint_overlay_html(self):
        test_name = 'paint/invalidation/repaint-overlay/layers.html'
        host = Host()
        port = host.port_factory.get()
        layer_tree_file = port.expected_filename(test_name, '.txt')
        if not layer_tree_file or not host.filesystem.exists(layer_tree_file):
            # This can happen if the scripts are not in the standard blink directory.
            return

        layer_tree = str(host.filesystem.read_text_file(layer_tree_file))
        self.assertTrue(repaint_overlay.result_contains_repaint_rects(layer_tree))
        overlay_html = (
            '<!-- Generated by Tools/Scripts/test-webkitpy\n' +
            ' test case: TestRepaintOverlay.test_generate_repaint_overlay_html. -->\n' +
            repaint_overlay.generate_repaint_overlay_html(test_name, layer_tree, layer_tree))

        results_directory = port.results_directory()
        host.filesystem.maybe_make_directory(results_directory)
        actual_overlay_html_file = host.filesystem.join(results_directory, 'layers-overlay.html')
        host.filesystem.write_text_file(actual_overlay_html_file, overlay_html)

        overlay_html_file = port.abspath_for_test('paint/invalidation/repaint-overlay/layers-overlay.html')
        expected = host.filesystem.read_text_file(overlay_html_file)

        self.assertEquals(
            expected, overlay_html,
            'This failure is probably caused by changed repaint_overlay.py. '
            'Please examine the diffs:\n  diff %s %s\n'
            'If the diffs are valid, update the file:\n  cp %s %s\n'
            'then update layers-overlay-expected.html in the same directory if needed,'
            ' and commit the files together with the changed repaint_overlay.py.' %
            (overlay_html_file, actual_overlay_html_file, actual_overlay_html_file, overlay_html_file))
コード例 #21
0
 def _begin_logging(self):
     _queue_log_path = self._delegate.queue_log_path()
     # We are using logging.getLogger("webkitpy") instead of _log since we want to capture all messages logged from webkitpy modules.
     self._log_handler = logutils.configure_logger_to_log_to_file(
         logging.getLogger("webkitpy"), _queue_log_path,
         Host().filesystem)
     self._queue_log = self._output_tee.add_log(_queue_log_path)
     self._work_log = None
コード例 #22
0
    def __init__(self, port, options, tests=[]):
        self._options = options

        self._build_type = "Debug" if self._options.debug else "Release"
        common.set_build_types((self._build_type, ))
        self._port = Host().port_factory.get(port)
        self._driver = self._create_driver()

        self._programs_path = common.binary_build_path()
        expectations_file = os.path.join(common.top_level_path(), "Tools",
                                         "TestWebKitAPI", "glib",
                                         "TestExpectations.json")
        self._expectations = TestExpectations(self._port.name(),
                                              expectations_file,
                                              self._build_type)
        self._tests = self._get_tests(tests)
        self._disabled_tests = []
コード例 #23
0
def main(_argv, _stdout, _stderr):
    options = parse_args(_argv)

    configure_logging()

    test_exporter = TestExporter(Host(), options)

    test_exporter.do_export()
コード例 #24
0
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(Port):
        def __init__(self, host):
            super(AllPlatformsPort, self).__init__(host, 'mac')
            self._platforms_by_directory = dict([
                (self._webkit_baseline_path(p), p)
                for p in test_config.platforms
            ])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory,
                                            test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host.initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(
            test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file,
                                                          baseline_extension,
                                                          all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(
                    platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory,
                                 expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
コード例 #25
0
def main():
    configure_logging()
    options = parse_args()
    host = Host()
    wpt_github = WPTGitHub(host)
    test_exporter = TestExporter(host, wpt_github, dry_run=options.dry_run)

    test_exporter.run()
コード例 #26
0
 def do_association_check(self, files, cwd, host=Host()):
     _log.debug("Running TestExpectations linter")
     TestExpectationsChecker.lint_test_expectations(
         files,
         self._configuration,
         cwd,
         self._increment_error_count,
         host=host)
コード例 #27
0
ファイル: run_webkit_tests.py プロジェクト: iStonesy/WebKit
def main(argv, stdout, stderr):
    options, args = parse_args(argv)

    if options.platform and 'test' in options.platform:
        # It's a bit lame to import mocks into real code, but this allows the user
        # to run tests against the test platform interactively, which is useful for
        # debugging test failures.
        from webkitpy.common.host_mock import MockHost
        host = MockHost()
    else:
        host = Host()

    if options.lint_test_files:
        from webkitpy.layout_tests.lint_test_expectations import lint
        return lint(host, options, stderr)

    try:
        port = host.port_factory.get(options.platform, options)
    except NotImplementedError as e:
        # FIXME: is this the best way to handle unsupported port names?
        print(str(e), file=stderr)
        return EXCEPTIONAL_EXIT_STATUS

    stack_trace_path = host.filesystem.join(port.results_directory(),
                                            'python_stack_trace.txt')
    log_stack_trace_on_ctrl_c(output_file=stack_trace_path)
    log_stack_trace_on_term(output_file=stack_trace_path)

    if options.print_expectations:
        return _print_expectations(port, options, args, stderr)

    try:
        # Force all tests to use a smaller stack so that stack overflow tests can run faster.
        stackSizeInBytes = int(1.5 * 1024 * 1024)
        options.additional_env_var.append('JSC_maxPerThreadStackUsage=' +
                                          str(stackSizeInBytes))
        options.additional_env_var.append('__XPC_JSC_maxPerThreadStackUsage=' +
                                          str(stackSizeInBytes))
        options.additional_env_var.append('JSC_useSharedArrayBuffer=1')
        options.additional_env_var.append('__XPC_JSC_useSharedArrayBuffer=1')
        run_details = run(port, options, args, stderr)
        if run_details.exit_code != -1 and run_details.skipped_all_tests:
            return run_details.exit_code
        if run_details.exit_code != -1 and not run_details.initial_results.keyboard_interrupted:
            bot_printer = buildbot_results.BuildBotPrinter(
                stdout, options.debug_rwt_logging)
            bot_printer.print_results(run_details)

        return run_details.exit_code
    # We still need to handle KeyboardInterrupt, at least for webkitpy unittest cases.
    except KeyboardInterrupt:
        return INTERRUPTED_EXIT_STATUS
    except BaseException as e:
        if isinstance(e, Exception):
            print('\n%s raised: %s' % (e.__class__.__name__, str(e)),
                  file=stderr)
            traceback.print_exc(file=stderr)
        return EXCEPTIONAL_EXIT_STATUS
コード例 #28
0
 def _set_up_host_and_port(self):
     options = self._options
     if options.platform and 'test' in options.platform:
         # It is lame to import mocks into real code, but this allows us to use the test port in multi-process tests as well.
         from webkitpy.common.host_mock import MockHost
         host = MockHost()
     else:
         host = Host()
     self._port = host.port_factory.get(options.platform, options)
    def test_import_dir_with_no_tests_and_no_hg(self):
        # FIXME: Use MockHosts instead.
        host = Host()
        host.executive = MockExecutive2(exception=OSError())

        importer = TestImporter(host, DUMMY_SOURCE_DIR, DUMMY_REPO_DIR,
                                optparse.Values({"overwrite": False}))
        importer.source_directory = importer.path_from_webkit_root(
            "Tools", "Scripts", "webkitpy", "w3c")
        importer.destination_directory = tempfile.mkdtemp(prefix='csswg')

        oc = OutputCapture()
        oc.capture_output()
        try:
            importer.do_import()
        finally:
            oc.restore_output()
            shutil.rmtree(importer.destination_directory, ignore_errors=True)
コード例 #30
0
 def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
     self._perf_tests_dir = perf_tests_dir
     self._buildbot_output = buildbot_output
     self._options, self._args = self._parse_args(args)
     self._host = Host()
     self._host._initialize_scm()
     self._port = self._host.port_factory.get(self._options.platform, self._options)
     self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
     self._webkit_base_dir_len = len(self._port.webkit_base())
コード例 #31
0
 def __init__(self,
              perf_tests_dir,
              regular_output=sys.stderr,
              buildbot_output=sys.stdout,
              args=None):
     self._perf_tests_dir = perf_tests_dir
     self._buildbot_output = buildbot_output
     self._options, self._args = self._parse_args(args)
     self._host = Host()
     self._host._initialize_scm()
     self._port = self._host.port_factory.get(self._options.platform,
                                              self._options)
     self._printer = printing.Printer(self._port,
                                      self._options,
                                      regular_output,
                                      buildbot_output,
                                      configure_logging=False)
     self._webkit_base_dir_len = len(self._port.webkit_base())
コード例 #32
0
ファイル: test_parser.py プロジェクト: yeeway/node-jsc
    def __init__(self, options, filename, host=Host()):
        self.options = options
        self.filename = filename
        self.host = host
        self.filesystem = self.host.filesystem

        self.test_doc = None
        self.ref_doc = None
        self.load_file(filename)
コード例 #33
0
    def _generate_testing_host(self, files={}):
        host = Host()
        expectation_files = files

        host.filesystem = MockFileSystem(dirs=['/mock-checkout/LayoutTests'])
        options = optparse.Values()
        setattr(options, 'layout_tests_dir', '/mock-checkout/LayoutTests')

        all_ports = [host.port_factory.get(name, options=options) for name in host.port_factory.all_port_names()]
        for port in all_ports:
            for path in port.expectations_files():
                if path not in expectation_files:
                    expectation_files[path] = '# Empty expectation file\n'

        expectation_files['/mock-checkout/LayoutTests/css1/test.html'] = 'Test'
        expectation_files['/mock-checkout/LayoutTests/css1/test-expected.txt'] = 'Test Expectation'
        host.filesystem = MockFileSystem(files=expectation_files)
        return host
コード例 #34
0
    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)

        # The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
        if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
            self._options.webkit_test_runner = True

        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()
コード例 #35
0
    def _new_or_modified_tests(self):
        touched_files = self._tool.scm().changed_files()
        touched_files.extend(self._tool.scm().untracked_files())
        if not touched_files:
            return None

        configuration = "Debug" if (self._options.build_style == "debug") else "Release"
        port = Host().port_factory.get(self._tool.deprecated_port().port_flag_name, optparse.Values({'configuration': configuration}))
        return LayoutTestFinder(port, optparse.Values({'skipped': 'always', 'skip_failing_tests': False, 'http': True})).find_touched_tests(touched_files)
コード例 #36
0
        def run(self):
            options = self._options
            # FIXME: This should get the Host from the owner of this object
            # so this function can be properly mocked!
            host = Host()
            host._initialize_scm()
            port_obj = host.port_factory.get(self._platform_name, options)

            # The unix multiprocessing implementation clones the
            # log handler configuration into the child processes,
            # but the win implementation doesn't.
            configure_logging = (sys.platform == 'win32')

            # FIXME: this won't work if the calling process is logging
            # somewhere other than sys.stderr and sys.stdout, but I'm not sure
            # if this will be an issue in practice.
            printer = printing.Printer(port_obj, options, sys.stderr, sys.stdout, configure_logging)
            self._client.run(port_obj)
            printer.cleanup()
コード例 #37
0
    def __init__(self):
        self._host = Host()
        self._filesystem = self._host.filesystem
        self._host.initialize_scm()
        self._webkit_root = self._host.scm().checkout_root

        # These settings might vary between WebKit and Blink
        self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
        self._css_property_split_string = 'alias_for='

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
コード例 #38
0
def main(args):
    """Bootstrap function that sets up the object references we need and calls real_main()."""
    options, target_options = parse_options(args)

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    if options.verbose:
        log_level = logging.DEBUG
        log_handler = DebugLogHandler()
    else:
        log_level = logging.INFO
        log_handler = NormalLogHandler()

    logger = logging.getLogger()
    logger.setLevel(log_level)
    logger.addHandler(log_handler)

    host = Host()
    host._initialize_scm()
    target_port_obj = host.port_factory.get(None, target_options)
    host_port_obj = get_host_port_object(host.port_factory, options)
    if not host_port_obj or not target_port_obj:
        return 1

    url_fetcher = urlfetcher.UrlFetcher(host_port_obj._filesystem)

    # We use the default zip factory method.
    zip_factory = None

    # FIXME: SCM module doesn't handle paths that aren't relative to the checkout_root consistently.
    host_port_obj._filesystem.chdir(host.scm().checkout_root)

    ret_code = real_main(options, target_options, host_port_obj, target_port_obj, url_fetcher, zip_factory, host.scm())
    if not ret_code and log_handler.num_failures:
        ret_code = 1
    print ""
    if ret_code:
        print "Rebaselining failed."
    else:
        print "Rebaselining succeeded."
    return ret_code
コード例 #39
0
    def main(self):
        args = sys.argv[1:]

        host = Host()
        host.initialize_scm()

        stderr = self._engage_awesome_stderr_hacks()

        # Checking for the verbose flag before calling check_webkit_style_parser()
        # lets us enable verbose logging earlier.
        is_verbose = "-v" in args or "--verbose" in args

        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
        _log.debug("Verbose logging enabled.")

        parser = checker.check_webkit_style_parser()
        (paths, options) = parser.parse(args)

        configuration = checker.check_webkit_style_configuration(options)

        paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)

        style_processor = StyleProcessor(configuration)
        file_reader = TextFileReader(host.filesystem, style_processor)

        if paths and not options.diff_files:
            file_reader.process_paths(paths)
        else:
            changed_files = paths if options.diff_files else None
            patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
            patch_checker = PatchReader(file_reader)
            patch_checker.check(patch)

        error_count = style_processor.error_count
        file_count = file_reader.file_count
        delete_only_file_count = file_reader.delete_only_file_count

        _log.info("Total errors found: %d in %d files" % (error_count, file_count))
        # We fail when style errors are found or there are no checked files.
        return error_count > 0
コード例 #40
0
 def __init__(self, args=None, port=None):
     self._options, self._args = PerfTestsRunner._parse_args(args)
     if port:
         self._port = port
         self._host = self._port.host
     else:
         self._host = Host()
         self._port = self._host.port_factory.get(self._options.platform, self._options)
     self._host.initialize_scm()
     self._webkit_base_dir_len = len(self._port.webkit_base())
     self._base_path = self._port.perf_tests_dir()
     self._results = {}
     self._timestamp = time.time()
コード例 #41
0
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(Port):
        def __init__(self, host):
            super(AllPlatformsPort, self).__init__(host, 'mac')
            self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host.initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory, expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
コード例 #42
0
ファイル: perftestsrunner.py プロジェクト: sohocoke/webkit
 def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
     self._buildbot_output = buildbot_output
     self._options, self._args = PerfTestsRunner._parse_args(args)
     if port:
         self._port = port
         self._host = self._port.host
     else:
         self._host = Host()
         self._port = self._host.port_factory.get(self._options.platform, self._options)
     self._host._initialize_scm()
     self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
     self._webkit_base_dir_len = len(self._port.webkit_base())
     self._base_path = self._port.perf_tests_dir()
     self._results = {}
     self._timestamp = time.time()
コード例 #43
0
ファイル: perftestsrunner.py プロジェクト: ruizhang331/WebKit
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _test_directories_for_chromium_style_tests = ['inspector']

    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
        self._buildbot_output = buildbot_output
        self._options, self._args = self._parse_args(args)
        self._host = Host()
        self._host._initialize_scm()
        self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)

    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=30000,
                                 help="Set the timeout for each test"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return -1

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests()
            unexpected = self._run_tests_set(tests, self._port)
        finally:
            self._printer.cleanup()

        return unexpected

    def _print_status(self, tests, expected, unexpected):
        if len(tests) == expected + unexpected:
            status = "Ran %d tests" % len(tests)
        else:
            status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
        if unexpected:
            status += " (%d didn't run)" % unexpected
        self._printer.write(status)

    def _run_tests_set(self, tests, port):
        result_count = len(tests)
        expected = 0
        unexpected = 0
        driver_need_restart = False
        driver = None

        for test in tests:
            if driver_need_restart:
                _log.debug("%s killing driver" % test)
                driver.stop()
                driver = None
            if not driver:
                driver = port.create_driver(worker_number=1)

            relative_test_path = self._host.filesystem.relpath(test, self._base_path)
            self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))

            is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
            test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
            if test_failed:
                unexpected = unexpected + 1
            else:
                expected = expected + 1

            self._printer.write('')

        if driver:
            driver.stop()

        return unexpected

    _inspector_result_regex = re.compile('^RESULT .*$')

    def _process_chromium_style_test_result(self, test, output):
        test_failed = False
        got_a_result = False
        for line in re.split('\n', output.text):
            if self._inspector_result_regex.match(line):
                self._buildbot_output.write("%s\n" % line)
                got_a_result = True
            elif not len(line) == 0:
                test_failed = True
                self._printer.write("%s" % line)
        return test_failed or not got_a_result

    _lines_to_ignore_in_parser_result = [
        re.compile(r'^Running \d+ times$'),
        re.compile(r'^Ignoring warm-up '),
        re.compile(r'^\d+$'),
    ]

    def _should_ignore_line_in_parser_test_result(self, line):
        if not line:
            return True
        for regex in self._lines_to_ignore_in_parser_result:
            if regex.match(line):
                return True
        return False

    def _process_parser_test_result(self, test, output):
        got_a_result = False
        test_failed = False
        filesystem = self._host.filesystem
        category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
        test_name = filesystem.splitext(test_name)[0]
        results = {}
        keys = ['avg', 'median', 'stdev', 'min', 'max']
        score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
        for line in re.split('\n', output.text):
            score = score_regex.match(line)
            if score:
                results[score.group(1)] = score.group(2)
                continue

            if not self._should_ignore_line_in_parser_test_result(line):
                test_failed = True
                self._printer.write("%s" % line)

        if test_failed or set(keys) != set(results.keys()):
            return True
        self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
        self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
        return False

    def _run_single_test(self, test, driver, is_chromium_style):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            if is_chromium_style:
                test_failed = self._process_chromium_style_test_result(test, output)
            else:
                test_failed = self._process_parser_test_result(test, output)

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        if test_failed:
            self._printer.write('FAILED')

        return test_failed, driver_need_restart
コード例 #44
0
ファイル: perftestsrunner.py プロジェクト: sukwon0709/Artemis
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--chromium",
                action="store_const",
                const="chromium",
                dest="platform",
                help="Alias for --platform=chromium",
            ),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--pause-before-testing",
                dest="pause_before_testing",
                action="store_true",
                default=False,
                help="Pause before running the tests to let user attach a performance monitor.",
            ),
            optparse.make_option("--output-json-path", help="Filename of the JSON file that summaries the results"),
            optparse.make_option(
                "--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in [".html", ".svg"]

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace("\\", "/")
            if self._port.skips_perf_test(relative_path):
                continue
            tests.append(PerfTestFactory.create_perf_test(relative_path, path))

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        tests = self._collect_tests()
        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if (
                not self._generate_json(
                    self._timestamp,
                    options.output_json_path,
                    options.source_json_path,
                    branch,
                    options.platform,
                    options.builder_name,
                    build_number,
                )
                and not unexpected
            ):
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(
        self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number
    ):
        contents = {"timestamp": int(timestamp), "results": self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + "-revision"] = self._host.scm().svn_revision(path)

        for key, value in {
            "branch": branch,
            "platform": platform,
            "builder-name": builder_name,
            "build-number": build_number,
        }.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
コード例 #45
0
ファイル: main.py プロジェクト: 335969568/Blink-1
 def __init__(self, path):
     MultiCommandTool.__init__(self)
     Host.__init__(self)
     self._path = path
コード例 #46
0
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date.",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page.",
            ),
            optparse.make_option(
                "--output-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists.",
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help="Clears the content in the generated JSON file before adding the results.",
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file.",
            ),
            optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests are done",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present.",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
            optparse.make_option(
                "--replay", dest="replay", action="store_true", default=False, help="Run replay tests."
            ),
            optparse.make_option(
                "--force",
                dest="use_skipped_list",
                action="store_false",
                default=True,
                help="Run all tests, including the ones in the Skipped list.",
            ),
            optparse.make_option("--profile", action="store_true", help="Output per-test profile information."),
            optparse.make_option(
                "--profiler", action="store", help="Output per-test profile information, using the specified profiler."
            ),
            optparse.make_option(
                "--additional-drt-flag",
                action="append",
                default=[],
                help="Additional command line flag to pass to DumpRenderTree "
                "Specify multiple times to add multiple flags.",
            ),
            optparse.make_option("--driver-name", type="string", help="Alternative DumpRenderTree binary to use"),
            optparse.make_option(
                "--repeat", default=1, type="int", help="Specify number of times to run test set (default: 1)."
            ),
            optparse.make_option(
                "--test-runner-count",
                default=DEFAULT_TEST_RUNNER_COUNT,
                type="int",
                help="Specify number of times to invoke test runner for each performance test.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = [".html", ".svg"]
        if self._options.replay:
            test_extensions.append(".replay")

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn("Path was not found:" + arg)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace("\\", "/")
            if (
                self._options.use_skipped_list
                and self._port.skips_perf_test(relative_path)
                and filesystem.normpath(relative_path) not in paths
            ):
                continue
            test = PerfTestFactory.create_perf_test(
                self._port, relative_path, path, test_runner_count=self._options.test_runner_count
            )
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=needs_http):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        run_count = 0
        repeat = self._options.repeat
        while run_count < repeat:
            run_count += 1

            tests = self._collect_tests()
            runs = " (Run %d of %d)" % (run_count, repeat) if repeat > 1 else ""
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            try:
                if needs_http:
                    self._start_http_servers()
                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            finally:
                if needs_http:
                    self._stop_http_servers()

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + ".html"

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(
            self._timestamp, options.description, options.platform, options.builder_name, options.build_number
        )

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), "resources/results-template.html")
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace("%AbsolutePathToWebKitTrunk%", absolute_path_to_trunk)
        results_page = results_page.replace("%PeformanceTestsResultsJSON%", json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {"revision": revision, "timestamp": scm.timestamp_of_revision(path, revision)}

        meta_info = {
            "description": description,
            "buildTime": self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            "platform": platform,
            "revisions": revisions,
            "builderName": builder_name,
            "buildNumber": int(build_number) if build_number else None,
        }

        contents = {"tests": {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents["tests"]
                path = test.test_name_without_file_extension().split("/")
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = view_source_url(
                        "PerformanceTests/" + (test.test_name() if is_last_token else "/".join(path[0 : i + 1]))
                    )
                    tests.setdefault(path[i], {"url": url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault("metrics", {})
                        assert metric_name not in current_test["metrics"]
                        current_test["metrics"][metric_name] = {"current": iteration_values}
                    else:
                        current_test.setdefault("tests", {})
                        tests = current_test["tests"]

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f")

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents["builder" + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
コード例 #47
0
        test_name = self._port.uri_to_test_name(test_input.uri)

        actual_text = port.expected_text(test_name)
        actual_image = ''
        actual_checksum = ''
        if self._options.pixel_tests and test_input.checksum:
            actual_checksum = port.expected_checksum(test_name)
            if actual_checksum != test_input.checksum:
                actual_image = port.expected_image(test_name)

        self._stdout.write("#URL:%s\n" % test_input.uri)
        if self._options.pixel_tests and test_input.checksum:
            self._stdout.write("#MD5:%s\n" % actual_checksum)
            self._host.filesystem.write_binary_file(self._options.pixel_path,
                                               actual_image)
        self._stdout.write(actual_text)

        # FIXME: (See above FIXME as well). Chromium DRT appears to always
        # ensure the text output has a trailing newline. Mac DRT does not.
        if not actual_text.endswith('\n'):
            self._stdout.write('\n')
        self._stdout.write('#EOF\n')
        self._stdout.flush()


if __name__ == '__main__':
    # FIXME: Why is this using a real Host object instead of MockHost?
    host = Host()
    host._initialize_scm()
    sys.exit(main(sys.argv[1:], host, sys.stdin, sys.stdout, sys.stderr))
コード例 #48
0
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _result_regex = re.compile('^RESULT .*$')

    def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
        self._perf_tests_dir = perf_tests_dir
        self._buildbot_output = buildbot_output
        self._options, self._args = self._parse_args(args)
        self._host = Host()
        self._host._initialize_scm()
        self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())

    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=30000,
                                 help="Set the timeout for each test"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self, webkit_base, filesystem=None):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        filesystem = filesystem or self._host.filesystem
        base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir, self._perf_tests_dir)
        return find_files.find(filesystem, base_dir, paths=self._args, file_filter=_is_test_file)

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return -1

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests(self._port.webkit_base())
            unexpected = self._run_tests_set(tests, self._port)
        finally:
            self._printer.cleanup()

        return unexpected

    def _run_tests_set(self, tests, port):
        result_count = len(tests)
        expected = 0
        unexpected = 0
        self._printer.print_one_line_summary(result_count, 0, 0)
        driver_need_restart = False
        driver = None

        for test in tests:
            if driver_need_restart:
                _log.debug("%s killing driver" % test)
                driver.stop()
                driver = None
            if not driver:
                driver = port.create_driver(worker_number=1)

            test_failed, driver_need_restart = self._run_single_test(test, driver)
            if test_failed:
                unexpected = unexpected + 1
            else:
                expected = expected + 1

            self._printer.print_one_line_summary(result_count, expected, unexpected)

        if driver:
            driver.stop()

        return unexpected

    def _run_single_test(self, test, driver):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            got_a_result = False
            for line in re.split('\n', output.text):
                if self._result_regex.match(line):
                    self._buildbot_output.write("%s\n" % line)
                    got_a_result = True
                elif not len(line) == 0:
                    test_failed = True
                    self._printer.write("%s" % line)
            test_failed = test_failed or not got_a_result

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        return test_failed, driver_need_restart
コード例 #49
0
ファイル: main.py プロジェクト: 0x4d52/JavaScriptCore-X
    def __init__(self, path):
        MultiCommandTool.__init__(self)
        Host.__init__(self)

        self._path = path
        self.wakeup_event = threading.Event()
コード例 #50
0
 def __init__(self):
     self._host = Host()
     self._filesystem = self._host.filesystem
     self._host.initialize_scm()
     self._webkit_root = self._host.scm().checkout_root
     self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
コード例 #51
0
 def fake_dir_path(self, dirname):
     filesystem = Host().filesystem
     webkit_root = WebKitFinder(filesystem).webkit_base()
     return filesystem.abspath(filesystem.join(webkit_root, "LayoutTests", "css", dirname))
コード例 #52
0
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = "PerformanceTestsResults.json"

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)

        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--chromium",
                action="store_const",
                const="chromium",
                dest="platform",
                help="Alias for --platform=chromium",
            ),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date.",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--pause-before-testing",
                dest="pause_before_testing",
                action="store_true",
                default=False,
                help="Pause before running the tests to let user attach a performance monitor.",
            ),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page.",
            ),
            optparse.make_option(
                "--output-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists.",
            ),
            optparse.make_option(
                "--reset-results",
                action="store_true",
                help="Clears the content in the generated JSON file before adding the results.",
            ),
            optparse.make_option(
                "--slave-config-json-path",
                action="callback",
                callback=_expand_path,
                type="str",
                help="Only used on bots. Path to a slave configuration file.",
            ),
            optparse.make_option("--description", help="Add a description to the output JSON file if one is generated"),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help="Don't launch a browser with results after the tests are done",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present.",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
            optparse.make_option(
                "--replay", dest="replay", action="store_true", default=False, help="Run replay tests."
            ),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_true",
                default=False,
                help="Run all tests, including the ones in the Skipped list.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = [".html", ".svg"]
        if self._options.replay:
            test_extensions.append(".replay")

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace("\\", "/")
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
        if self._options.generate_results:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(
            self._timestamp, options.description, options.platform, options.builder_name, options.build_number
        )

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + ".html"
        self._generate_output_files(output_json_path, results_page_path, output)

        if options.test_results_server:
            if not self._upload_json(options.test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING

        if options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {"results": self._results}
        if description:
            contents["description"] = description
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            contents[name + "-revision"] = scm.svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {
            "timestamp": int(timestamp),
            "branch": self._default_branch,
            "platform": platform,
            "builder-name": builder_name,
            "build-number": int(build_number) if build_number else None,
        }.items():
            if value:
                contents[key] = value

        return contents

    def _merge_slave_config_json(self, slave_config_json_path, output):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            return dict(slave_config.items() + output.items())
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
コード例 #53
0
ファイル: perftestsrunner.py プロジェクト: sohocoke/webkit
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _test_directories_for_chromium_style_tests = ['inspector']
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3

    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
        self._buildbot_output = buildbot_output
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results"),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        skipped_directories = set(['.svn', 'resources'])
        tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
        return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests()
            unexpected = self._run_tests_set(sorted(list(tests)), self._port)
        finally:
            self._printer.cleanup()

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
                branch, options.platform, options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
        contents = {'timestamp': int(timestamp), 'results': self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
コード例 #54
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)

        # The GTK+ and EFL ports only supports WebKit2, so they always use WKTR.
        if self._port.name().startswith("gtk") or self._port.name().startswith("efl"):
            self._options.webkit_test_runner = True

        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--dump-render-tree", "-1", action="store_false", default=True, dest="webkit_test_runner",
                help="Use DumpRenderTree rather than WebKitTestRunner."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-drt-flag", action="append",
                default=[], help="Additional command line flag to pass to DumpRenderTree "
                     "Specify multiple times to add multiple flags."),
            optparse.make_option("--driver-name", type="string",
                help="Alternative DumpRenderTree binary to use"),
            optparse.make_option("--repeat", default=1, type="int",
                help="Specify number of times to run test set (default: 1)."),
            optparse.make_option("--test-runner-count", default=-1, type="int",
                help="Specify number of times to invoke test runner for each performance test."),
            optparse.make_option("--wrapper",
                help="wrapper command to insert before invocations of "
                 "DumpRenderTree or WebKitTestRunner; option is split on whitespace before "
                 "running. (Example: --wrapper='valgrind --smc-check=all')"),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []

        test_runner_count = DEFAULT_TEST_RUNNER_COUNT
        if self._options.test_runner_count > 0:
            test_runner_count = self._options.test_runner_count
        elif self._options.profile:
            test_runner_count = 1

        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=test_runner_count)
            tests.append(test)

        return tests

    def run(self):
        if "Debug" == self._port.get_option("configuration"):
            _log.warning("""****************************************************
* WARNING: run-perf-tests is running in DEBUG mode *
****************************************************""")

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        # Check that the system dependencies (themes, fonts, ...) are correct.
        if not self._port.check_sys_deps(needs_http=False):
            _log.error("Failed to check system dependencies.")
            self._port.stop_helper()
            return self.EXIT_CODE_BAD_PREPARATION

        run_count = 0
        repeat = self._options.repeat
        while (run_count < repeat):
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s" % (len(tests), runs))

            for test in tests:
                if not test.prepare(self._options.time_out_ms):
                    return self.EXIT_CODE_BAD_PREPARATION

            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for metric in self._results:
            tests = contents['tests']
            path = metric.path()
            for i in range(0, len(path)):
                is_last_token = i + 1 == len(path)
                url = view_source_url('PerformanceTests/' + '/'.join(path[0:i + 1]))
                test_name = path[i]

                # FIXME: This is a temporary workaround for the fact perf dashboard doesn't support renaming tests.
                if test_name == 'Speedometer':
                    test_name = 'DoYouEvenBench'

                tests.setdefault(test_name, {'url': url})
                current_test = tests[test_name]
                if is_last_token:
                    current_test['url'] = view_source_url('PerformanceTests/' + metric.test_file_name())
                    current_test.setdefault('metrics', {})
                    assert metric.name() not in current_test['metrics']
                    test_results = {'current': metric.grouped_iteration_values()}
                    if metric.aggregator():
                        test_results['aggregators'] = [metric.aggregator()]
                    current_test['metrics'][metric.name()] = test_results
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
コード例 #55
0
ファイル: perftestsrunner.py プロジェクト: mirror/chromium
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option('-t', '--target', dest='configuration',
                                 help='Specify the target build subdirectory under src/out/'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (e.g. mac)"),
            optparse.make_option("--chromium",
                                 action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--android",
                                 action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
            optparse.make_option("--builder-name",
                                 help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                                 help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                                 help="Check to ensure the DumpRenderTree build is up to date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                                 help="Don't check to see if the DumpRenderTree build is up to date."),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                                 help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                                 help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                                 help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                                 help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                                 help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                                 help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                                 help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                                 help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                                 help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                                 help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-driver-flag", action="append",
                                 default=[], help="Additional command line flag to pass to DumpRenderTree "
                                 "Specify multiple times to add multiple flags."),
            optparse.make_option("--driver-name", type="string",
                                 help="Alternative DumpRenderTree binary to use"),
            optparse.make_option("--content-shell", action="store_true",
                                 help="Use Content Shell instead of DumpRenderTree"),
            optparse.make_option("--repeat", default=1, type="int",
                                 help="Specify number of times to run test set (default: 1)."),
            optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
                                 help="Specify number of times to invoke test runner for each performance test."),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warning('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(
                    relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path,
                                                    test_runner_count=self._options.test_runner_count)
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        class FakePrinter(object):

            def write_update(self, msg):
                print msg

            def write_throttled_update(self, msg):
                pass

        if self._port.check_build(needs_http=needs_http, printer=FakePrinter()):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        run_count = 0
        repeat = self._options.repeat
        while run_count < repeat:
            run_count += 1

            tests = self._collect_tests()
            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
            _log.info("Running %d tests%s", len(tests), runs)

            try:
                if needs_http:
                    self._start_http_servers()
                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))

            finally:
                if needs_http:
                    self._stop_http_servers()

            if self._options.generate_results and not self._options.profile:
                exit_code = self._generate_results()
                if exit_code:
                    return exit_code

        if self._options.generate_results and not self._options.profile:
            test_results_server = self._options.test_results_server
            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
                return self.EXIT_CODE_FAILED_UPLOADING

            if self._options.show_results:
                self._port.show_results_html_file(self._results_page_path())

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _results_page_path(self):
        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'

    def _generate_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description,
                                             options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        filesystem = self._host.filesystem
        json_output = json.dumps(output)
        filesystem.write_text_file(output_json_path, json_output)

        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
        template = filesystem.read_text_file(template_path)

        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)

        filesystem.write_text_file(self._results_page_path(), results_page)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        revisions = {}
        path = self._port.repository_path()
        scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
        revision = str(scm.commit_position(path))
        revisions['chromium'] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}

        meta_info = {
            'description': description,
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        contents = {'tests': {}}
        for key, value in meta_info.items():
            if value:
                contents[key] = value

        for test, metrics in self._results:
            for metric_name, iteration_values in metrics.iteritems():
                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
                    continue

                tests = contents['tests']
                path = test.test_name_without_file_extension().split('/')
                for i in range(0, len(path)):
                    is_last_token = i + 1 == len(path)
                    url = self.view_source_url(
                        'PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
                    tests.setdefault(path[i], {'url': url})
                    current_test = tests[path[i]]
                    if is_last_token:
                        current_test.setdefault('metrics', {})
                        assert metric_name not in current_test['metrics']
                        current_test['metrics'][metric_name] = {'current': iteration_values}
                    else:
                        current_test.setdefault('tests', {})
                        tests = current_test['tests']

        return contents

    @staticmethod
    def view_source_url(path_from_blink):
        return 'https://chromium.googlesource.com/chromium/src/+/master/third_party/WebKit/%s' % path_from_blink

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s", slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception as error:
            _log.error("Failed to merge slave configuration JSON file %s: %s", slave_config_json_path, error)
        return None

    def _merge_outputs_if_needed(self, output_json_path, output):
        if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
            return [output]
        try:
            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
            return existing_outputs + [output]
        except Exception as error:
            _log.error("Failed to merge output JSON file %s: %s", output_json_path, error)
        return None

    def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
        url = "https://%s%s" % (test_results_server, host_path)
        uploader = file_uploader(url, 120)
        try:
            response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
        except Exception as error:
            _log.error("Failed to upload JSON file to %s in 120s: %s", url, error)
            return False

        response_body = [line.strip('\n') for line in response]
        if response_body != ['OK']:
            try:
                parsed_response = json.loads('\n'.join(response_body))
            except:
                _log.error("Uploaded JSON to %s but got a bad response:", url)
                for line in response_body:
                    _log.error(line)
                return False
            if parsed_response.get('status') != 'OK':
                _log.error("Uploaded JSON to %s but got an error:", url)
                _log.error(json.dumps(parsed_response, indent=4))
                return False

        _log.info("JSON file uploaded to %s.", url)
        return True

    def _run_tests_set(self, tests):
        failures = 0
        self._results = []

        for i, test in enumerate(tests):
            _log.info('Running %s (%d of %d)', test.test_name(), i + 1, len(tests))
            start_time = time.time()
            metrics = test.run(self._options.time_out_ms)
            if metrics:
                self._results.append((test, metrics))
            else:
                failures += 1
                _log.error('FAILED')

            _log.info('Finished: %f s', time.time() - start_time)
            _log.info('')

        return failures
コード例 #56
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host.initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()
        self._utc_timestamp = datetime.datetime.utcnow()

    @staticmethod
    def _parse_args(args=None):
        def _expand_path(option, opt_str, value, parser):
            path = os.path.expandvars(os.path.expanduser(value))
            setattr(parser.values, option.dest, path)
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--chromium-android",
                action="store_const", const='chromium-android', dest='platform', help='Alias for --platform=chromium-android'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--reset-results", action="store_true",
                help="Clears the content in the generated JSON file before adding the results."),
            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
                help="Run all tests, including the ones in the Skipped list."),
            optparse.make_option("--profile", action="store_true",
                help="Output per-test profile information."),
            optparse.make_option("--profiler", action="store",
                help="Output per-test profile information, using the specified profiler."),
            optparse.make_option("--additional-drt-flag", action="append",
                default=[], help="Additional command line flag to pass to DumpRenderTree "
                     "Specify multiple times to add multiple flags."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            if filesystem.exists(filesystem.join(self._base_path, arg)):
                paths.append(arg)
            else:
                relpath = filesystem.relpath(arg, self._base_path)
                if filesystem.exists(filesystem.join(self._base_path, relpath)):
                    paths.append(filesystem.normpath(relpath))
                else:
                    _log.warn('Path was not found:' + arg)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def _start_http_servers(self):
        self._port.acquire_http_lock()
        self._port.start_http_server(number_of_servers=2)

    def _stop_http_servers(self):
        self._port.stop_http_server()
        self._port.release_http_lock()

    def run(self):
        needs_http = self._port.requires_http_server()

        if not self._port.check_build(needs_http=needs_http):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        try:
            if needs_http:
                self._start_http_servers()
            unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        finally:
            if needs_http:
                self._stop_http_servers()

        if self._options.generate_results and not self._options.profile:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.slave_config_json_path:
            output = self._merge_slave_config_json(options.slave_config_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        output = self._merge_outputs_if_needed(output_json_path, output)
        if not output:
            return self.EXIT_CODE_BAD_MERGE

        results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
        self._generate_output_files(output_json_path, results_page_path, output)

        if options.test_results_server:
            if options.test_results_server == 'webkit-perf.appspot.com':
                options.test_results_server = 'perf.webkit.org'

            if not self._upload_json(options.test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING

        if options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'tests': {}}
        if description:
            contents['description'] = description

        revisions = {}
        for (name, path) in self._port.repository_paths():
            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
            revision = scm.svn_revision(path)
            revisions[name] = {'revision': str(revision), 'timestamp': scm.timestamp_of_latest_commit(path, revision)}

        meta_info = {
            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
            'platform': platform,
            'revisions': revisions,
            'builderName': builder_name,
            'buildNumber': int(build_number) if build_number else None}

        for key, value in meta_info.items():
            if value:
                contents[key] = value

        # FIXME: Make this function shorter once we've transitioned to use perf.webkit.org.
        for metric_full_name, result in self._results.iteritems():
            if not isinstance(result, dict):  # We can't reports results without indivisual measurements.
                continue

            assert metric_full_name.count(':') <= 1
            test_full_name, _, metric = metric_full_name.partition(':')
            if not metric:
                metric = {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[result['unit']]

            tests = contents['tests']
            path = test_full_name.split('/')
            for i in range(0, len(path)):
                # FIXME: We shouldn't assume HTML extension.
                is_last_token = i + 1 == len(path)
                url = 'http://trac.webkit.org/browser/trunk/PerformanceTests/' + '/'.join(path[0:i + 1])
                if is_last_token:
                    url += '.html'

                tests.setdefault(path[i], {'url': url})
                current_test = tests[path[i]]
                if is_last_token:
                    current_test.setdefault('metrics', {})
                    assert metric not in current_test['metrics']
                    current_test['metrics'][metric] = {'current': result['values']}
                else:
                    current_test.setdefault('tests', {})
                    tests = current_test['tests']

        return contents

    @staticmethod
    def _datetime_in_ES5_compatible_iso_format(datetime):
        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')

    def _merge_slave_config_json(self, slave_config_json_path, contents):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            for key in slave_config:
                contents['builder' + key.capitalize()] = slave_config[key]
            return contents
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
コード例 #57
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3
    _EXIT_CODE_BAD_PREPARATION = -4

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
                help="Pause before running the tests to let user attach a performance monitor."),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results."),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="skipped", action="store_true", default=False,
                help="Run all tests, including the ones in the Skipped list."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self._EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
                branch, options.platform, options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
        contents = {'timestamp': int(timestamp), 'results': self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
コード例 #58
0
class W3CTestConverter(object):

    def __init__(self):
        self._host = Host()
        self._filesystem = self._host.filesystem
        self._host.initialize_scm()
        self._webkit_root = self._host.scm().checkout_root

        # These settings might vary between WebKit and Blink
        self._css_property_file = self.path_from_webkit_root('Source', 'core', 'css', 'CSSPropertyNames.in')
        self._css_property_split_string = 'alias_for='

        self.prefixed_properties = self.read_webkit_prefixed_css_property_list()

    def path_from_webkit_root(self, *comps):
        return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))

    def read_webkit_prefixed_css_property_list(self):
        prefixed_properties = []

        contents = self._filesystem.read_text_file(self._css_property_file)
        for line in contents.splitlines():
            # Find lines starting with the -webkit- prefix.
            match = re.match('-webkit-[\w|-]*', line)
            if match:
                # Ignore lines where both the prefixed and non-prefixed property
                # are supported - denoted by -webkit-some-property = some-property.
                fields = line.split(self._css_property_split_string)
                if len(fields) == 2 and fields[1].strip() in fields[0].strip():
                    continue
                prefixed_properties.append(match.group(0))

        return prefixed_properties

    def convert_for_webkit(self, new_path, filename):
        """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.

        Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
        contents = self._filesystem.read_binary_file(filename)
        if filename.endswith('.css'):
            return self.convert_css(contents, filename)
        return self.convert_html(new_path, contents, filename)

    def convert_css(self, contents, filename):
        return self.add_webkit_prefix_to_unprefixed_properties(contents, filename)

    def convert_html(self, new_path, contents, filename):
        doc = BeautifulSoup(contents)
        did_modify_paths = self.convert_testharness_paths(doc, new_path, filename)
        converted_properties_and_content = self.convert_prefixed_properties(doc, filename)
        return converted_properties_and_content if (did_modify_paths or converted_properties_and_content[0]) else None

    def convert_testharness_paths(self, doc, new_path, filename):
        """ Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.

        Returns whether the document was modified."""

        # Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
        pattern = re.compile('/resources/testharness')
        script_tags = doc.findAll(src=pattern)
        link_tags = doc.findAll(href=pattern)
        testharness_tags = script_tags + link_tags

        if not testharness_tags:
            return False

        resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
        resources_relpath = self._filesystem.relpath(resources_path, new_path)

        for tag in testharness_tags:
            # FIXME: We need to handle img, audio, video tags also.
            attr = 'src'
            if tag.name != 'script':
                attr = 'href'

            if not attr in tag.attrMap:
                # FIXME: Figure out what to do w/ invalid tags. For now, we return False
                # and leave the document unmodified, which means that it'll probably fail to run.
                _log.error("Missing an attr in %s" % filename)
                return False

            old_path = tag[attr]
            new_tag = Tag(doc, tag.name, tag.attrs)
            new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness', old_path)

            self.replace_tag(tag, new_tag)

        return True

    def convert_prefixed_properties(self, doc, filename):
        """ Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.

        Returns the list of converted properties and the modified document as a string """

        converted_properties = []

        # Look for inline and document styles.
        inline_styles = doc.findAll(style=re.compile('.*'))
        style_tags = doc.findAll('style')
        all_styles = inline_styles + style_tags

        for tag in all_styles:

            # Get the text whether in a style tag or style attribute.
            style_text = ''
            if tag.name == 'style':
                if not tag.contents:
                    continue
                style_text = tag.contents[0]
            else:
                style_text = tag['style']

            updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(style_text, filename)

            # Rewrite tag only if changes were made.
            if updated_style_text[0]:
                converted_properties.extend(updated_style_text[0])

                new_tag = Tag(doc, tag.name, tag.attrs)
                new_tag.insert(0, updated_style_text[1])

                self.replace_tag(tag, new_tag)

        return (converted_properties, doc.prettify())

    def add_webkit_prefix_to_unprefixed_properties(self, text, filename):
        """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.

        Returns the list of converted properties and the modified text."""

        converted_properties = []

        for prefixed_property in self.prefixed_properties:
            # FIXME: add in both the prefixed and unprefixed versions, rather than just replacing them?
            # That might allow the imported test to work in other browsers more easily.

            unprefixed_property = prefixed_property.replace('-webkit-', '')

            # Look for the various ways it might be in the CSS
            # Match the the property preceded by either whitespace or left curly brace
            # or at the beginning of the string (for inline style attribute)
            pattern = '([\s{]|^)' + unprefixed_property + '(\s+:|:)'
            if re.search(pattern, text):
                _log.info('converting %s -> %s' % (unprefixed_property, prefixed_property))
                converted_properties.append(prefixed_property)
                text = re.sub(pattern, prefixed_property + ':', text)

        # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
        return (converted_properties, text)

    def replace_tag(self, old_tag, new_tag):
        index = old_tag.parent.contents.index(old_tag)
        old_tag.parent.insert(index, new_tag)
        old_tag.extract()
コード例 #59
0
def main():
    options, args = parse_args()
    host = Host()
    host._initialize_scm()
    port = host.port_factory.get(options.platform, options)
    return run(port, options, args)