Exemple #1
0
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(WebKitPort):
        def __init__(self, host):
            WebKitPort.__init__(self, host, 'mac')
            self._platforms_by_directory = dict([
                (self._webkit_baseline_path(p), p)
                for p in test_config.platforms
            ])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory,
                                            test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host._initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(
            test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file,
                                                          baseline_extension,
                                                          all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(
                    platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory,
                                 expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
Exemple #2
0
    def main(self):
        args = sys.argv[1:]

        host = Host()
        host._initialize_scm()

        stderr = self._engage_awesome_stderr_hacks()

        # Checking for the verbose flag before calling check_webkit_style_parser()
        # lets us enable verbose logging earlier.
        is_verbose = "-v" in args or "--verbose" in args

        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
        _log.debug("Verbose logging enabled.")

        parser = checker.check_webkit_style_parser()
        (paths, options) = parser.parse(args)

        configuration = checker.check_webkit_style_configuration(options)

        paths = change_directory(host.filesystem,
                                 checkout_root=host.scm().checkout_root,
                                 paths=paths)

        style_processor = StyleProcessor(configuration)
        file_reader = TextFileReader(host.filesystem, style_processor)

        if paths and not options.diff_files:
            file_reader.process_paths(paths)
        else:
            changed_files = paths if options.diff_files else None
            patch = host.scm().create_patch(options.git_commit,
                                            changed_files=changed_files)
            patch_checker = PatchReader(file_reader)
            patch_checker.check(patch)

        error_count = style_processor.error_count
        file_count = file_reader.file_count
        delete_only_file_count = file_reader.delete_only_file_count

        _log.info("Total errors found: %d in %d files" %
                  (error_count, file_count))
        # We fail when style errors are found or there are no checked files.
        return error_count > 0 or (file_count == 0
                                   and delete_only_file_count == 0)
Exemple #3
0
        def run(self):
            options = self._options
            # FIXME: This should get the Host from the owner of this object
            # so this function can be properly mocked!
            host = Host()
            host._initialize_scm()
            port_obj = host.port_factory.get(self._platform_name, options)

            # The unix multiprocessing implementation clones the
            # log handler configuration into the child processes,
            # but the win implementation doesn't.
            configure_logging = (sys.platform == 'win32')

            # FIXME: this won't work if the calling process is logging
            # somewhere other than sys.stderr and sys.stdout, but I'm not sure
            # if this will be an issue in practice.
            printer = printing.Printer(port_obj, options, sys.stderr, sys.stdout, configure_logging)
            self._client.run(port_obj)
            printer.cleanup()
Exemple #4
0
def main(args):
    """Bootstrap function that sets up the object references we need and calls real_main()."""
    options, target_options = parse_options(args)

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    if options.verbose:
        log_level = logging.DEBUG
        log_handler = DebugLogHandler()
    else:
        log_level = logging.INFO
        log_handler = NormalLogHandler()

    logger = logging.getLogger()
    logger.setLevel(log_level)
    logger.addHandler(log_handler)

    host = Host()
    host._initialize_scm()
    target_port_obj = host.port_factory.get(None, target_options)
    host_port_obj = get_host_port_object(host.port_factory, options)
    if not host_port_obj or not target_port_obj:
        return 1

    url_fetcher = urlfetcher.UrlFetcher(host.filesystem)

    # We use the default zip factory method.
    zip_factory = None

    # FIXME: SCM module doesn't handle paths that aren't relative to the checkout_root consistently.
    host_port_obj._filesystem.chdir(host.scm().checkout_root)

    ret_code = real_main(host, options, target_options, host_port_obj,
                         target_port_obj, url_fetcher, zip_factory)
    if not ret_code and log_handler.num_failures:
        ret_code = 1
    print ''
    if ret_code:
        print 'Rebaselining failed.'
    else:
        print 'Rebaselining succeeded.'
    return ret_code
Exemple #5
0
def main(args):
    """Bootstrap function that sets up the object references we need and calls real_main()."""
    options, target_options = parse_options(args)

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    if options.verbose:
        log_level = logging.DEBUG
        log_handler = DebugLogHandler()
    else:
        log_level = logging.INFO
        log_handler = NormalLogHandler()

    logger = logging.getLogger()
    logger.setLevel(log_level)
    logger.addHandler(log_handler)

    host = Host()
    host._initialize_scm()
    target_port_obj = host.port_factory.get(None, target_options)
    host_port_obj = get_host_port_object(host.port_factory, options)
    if not host_port_obj or not target_port_obj:
        return 1

    url_fetcher = urlfetcher.UrlFetcher(host_port_obj._filesystem)

    # We use the default zip factory method.
    zip_factory = None

    # FIXME: SCM module doesn't handle paths that aren't relative to the checkout_root consistently.
    host_port_obj._filesystem.chdir(host.scm().checkout_root)

    ret_code = real_main(options, target_options, host_port_obj, target_port_obj, url_fetcher, zip_factory, host.scm())
    if not ret_code and log_handler.num_failures:
        ret_code = 1
    print ""
    if ret_code:
        print "Rebaselining failed."
    else:
        print "Rebaselining succeeded."
    return ret_code
Exemple #6
0
    def main(self):
        args = sys.argv[1:]

        host = Host()
        host._initialize_scm()

        stderr = self._engage_awesome_stderr_hacks()

        # Checking for the verbose flag before calling check_webkit_style_parser()
        # lets us enable verbose logging earlier.
        is_verbose = "-v" in args or "--verbose" in args

        checker.configure_logging(stream=stderr, is_verbose=is_verbose)
        _log.debug("Verbose logging enabled.")

        parser = checker.check_webkit_style_parser()
        (paths, options) = parser.parse(args)

        configuration = checker.check_webkit_style_configuration(options)

        paths = change_directory(host.filesystem, checkout_root=host.scm().checkout_root, paths=paths)

        style_processor = StyleProcessor(configuration)
        file_reader = TextFileReader(host.filesystem, style_processor)

        if paths and not options.diff_files:
            file_reader.process_paths(paths)
        else:
            changed_files = paths if options.diff_files else None
            patch = host.scm().create_patch(options.git_commit, changed_files=changed_files)
            patch_checker = PatchReader(file_reader)
            patch_checker.check(patch)

        error_count = style_processor.error_count
        file_count = file_reader.file_count
        delete_only_file_count = file_reader.delete_only_file_count

        _log.info("Total errors found: %d in %d files" % (error_count, file_count))
        # We fail when style errors are found or there are no checked files.
        return error_count > 0 or (file_count == 0 and delete_only_file_count == 0)
Exemple #7
0
def get_test_baselines(test_file, test_config):
    # FIXME: This seems like a hack. This only seems used to access the Port.expected_baselines logic.
    class AllPlatformsPort(WebKitPort):
        def __init__(self, host):
            WebKitPort.__init__(self, host)
            self._platforms_by_directory = dict([(self._webkit_baseline_path(p), p) for p in test_config.platforms])

        def baseline_search_path(self):
            return self._platforms_by_directory.keys()

        def platform_from_directory(self, directory):
            return self._platforms_by_directory[directory]

    test_path = test_config.filesystem.join(test_config.layout_tests_directory, test_file)

    # FIXME: This should get the Host from the test_config to be mockable!
    host = Host()
    host._initialize_scm()
    host.filesystem = test_config.filesystem
    all_platforms_port = AllPlatformsPort(host)

    all_test_baselines = {}
    for baseline_extension in ('.txt', '.checksum', '.png'):
        test_baselines = test_config.test_port.expected_baselines(test_file, baseline_extension)
        baselines = all_platforms_port.expected_baselines(test_file, baseline_extension, all_baselines=True)
        for platform_directory, expected_filename in baselines:
            if not platform_directory:
                continue
            if platform_directory == test_config.layout_tests_directory:
                platform = 'base'
            else:
                platform = all_platforms_port.platform_from_directory(platform_directory)
            platform_baselines = all_test_baselines.setdefault(platform, {})
            was_used_for_test = (platform_directory, expected_filename) in test_baselines
            platform_baselines[baseline_extension] = was_used_for_test

    return all_test_baselines
Exemple #8
0
        test_name = self._port.uri_to_test_name(test_input.uri)

        actual_text = port.expected_text(test_name)
        actual_image = ''
        actual_checksum = ''
        if self._options.pixel_tests and test_input.checksum:
            actual_checksum = port.expected_checksum(test_name)
            if actual_checksum != test_input.checksum:
                actual_image = port.expected_image(test_name)

        self._stdout.write("#URL:%s\n" % test_input.uri)
        if self._options.pixel_tests and test_input.checksum:
            self._stdout.write("#MD5:%s\n" % actual_checksum)
            self._host.filesystem.write_binary_file(self._options.pixel_path,
                                               actual_image)
        self._stdout.write(actual_text)

        # FIXME: (See above FIXME as well). Chromium DRT appears to always
        # ensure the text output has a trailing newline. Mac DRT does not.
        if not actual_text.endswith('\n'):
            self._stdout.write('\n')
        self._stdout.write('#EOF\n')
        self._stdout.flush()


if __name__ == '__main__':
    # FIXME: Why is this using a real Host object instead of MockHost?
    host = Host()
    host._initialize_scm()
    sys.exit(main(sys.argv[1:], host, sys.stdin, sys.stdout, sys.stderr))
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _result_regex = re.compile('^RESULT .*$')

    def __init__(self,
                 perf_tests_dir,
                 regular_output=sys.stderr,
                 buildbot_output=sys.stdout,
                 args=None):
        self._perf_tests_dir = perf_tests_dir
        self._buildbot_output = buildbot_output
        self._options, self._args = self._parse_args(args)
        self._host = Host()
        self._host._initialize_scm()
        self._port = self._host.port_factory.get(self._options.platform,
                                                 self._options)
        self._printer = printing.Printer(self._port,
                                         self._options,
                                         regular_output,
                                         buildbot_output,
                                         configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())

    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=30000,
                                 help="Set the timeout for each test"),
        ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self, webkit_base, filesystem=None):
        """Return the list of tests found."""
        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        filesystem = filesystem or self._host.filesystem
        base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir,
                                   self._perf_tests_dir)
        return find_files.find(filesystem,
                               base_dir,
                               paths=self._args,
                               file_filter=_is_test_file)

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" %
                       self._port._path_to_driver())
            return -1

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests(self._port.webkit_base())
            unexpected = self._run_tests_set(tests, self._port)
        finally:
            self._printer.cleanup()

        return unexpected

    def _run_tests_set(self, tests, port):
        result_count = len(tests)
        expected = 0
        unexpected = 0
        self._printer.print_one_line_summary(result_count, 0, 0)
        driver_need_restart = False
        driver = None

        for test in tests:
            if driver_need_restart:
                _log.debug("%s killing driver" % test)
                driver.stop()
                driver = None
            if not driver:
                driver = port.create_driver(worker_number=1)

            test_failed, driver_need_restart = self._run_single_test(
                test, driver)
            if test_failed:
                unexpected = unexpected + 1
            else:
                expected = expected + 1

            self._printer.print_one_line_summary(result_count, expected,
                                                 unexpected)

        if driver:
            driver.stop()

        return unexpected

    def _run_single_test(self, test, driver):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(
            DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' %
                                test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' %
                                test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            got_a_result = False
            for line in re.split('\n', output.text):
                if self._result_regex.match(line):
                    self._buildbot_output.write("%s\n" % line)
                    got_a_result = True
                elif not len(line) == 0:
                    test_failed = True
                    self._printer.write("%s" % line)
            test_failed = test_failed or not got_a_result

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        return test_failed, driver_need_restart
Exemple #10
0
def main():
    options, args = parse_args()
    host = Host()
    host._initialize_scm()
    port = host.port_factory.get(options.platform, options)
    return run(port, options, args)
Exemple #11
0
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _test_directories_for_chromium_style_tests = ['inspector']
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3

    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
        self._buildbot_output = buildbot_output
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results"),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        skipped_directories = set(['.svn', 'resources'])
        tests = find_files.find(self._host.filesystem, self._base_path, self._args, skipped_directories, _is_test_file)
        return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests()
            unexpected = self._run_tests_set(sorted(list(tests)), self._port)
        finally:
            self._printer.cleanup()

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
                branch, options.platform, options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
        contents = {'timestamp': int(timestamp), 'results': self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
Exemple #12
0
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _result_regex = re.compile('^RESULT .*$')

    def __init__(self, perf_tests_dir, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
        self._perf_tests_dir = perf_tests_dir
        self._buildbot_output = buildbot_output
        self._options, self._args = self._parse_args(args)
        self._host = Host()
        self._host._initialize_scm()
        self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())

    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=30000,
                                 help="Set the timeout for each test"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self, webkit_base, filesystem=None):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        filesystem = filesystem or self._host.filesystem
        base_dir = filesystem.join(webkit_base, self._perf_tests_base_dir, self._perf_tests_dir)
        return find_files.find(filesystem, base_dir, paths=self._args, file_filter=_is_test_file)

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return -1

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests(self._port.webkit_base())
            unexpected = self._run_tests_set(tests, self._port)
        finally:
            self._printer.cleanup()

        return unexpected

    def _run_tests_set(self, tests, port):
        result_count = len(tests)
        expected = 0
        unexpected = 0
        self._printer.print_one_line_summary(result_count, 0, 0)
        driver_need_restart = False
        driver = None

        for test in tests:
            if driver_need_restart:
                _log.debug("%s killing driver" % test)
                driver.stop()
                driver = None
            if not driver:
                driver = port.create_driver(worker_number=1)

            test_failed, driver_need_restart = self._run_single_test(test, driver)
            if test_failed:
                unexpected = unexpected + 1
            else:
                expected = expected + 1

            self._printer.print_one_line_summary(result_count, expected, unexpected)

        if driver:
            driver.stop()

        return unexpected

    def _run_single_test(self, test, driver):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            got_a_result = False
            for line in re.split('\n', output.text):
                if self._result_regex.match(line):
                    self._buildbot_output.write("%s\n" % line)
                    got_a_result = True
                elif not len(line) == 0:
                    test_failed = True
                    self._printer.write("%s" % line)
            test_failed = test_failed or not got_a_result

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        return test_failed, driver_need_restart
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform,
                                                     self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                                 action="store_const",
                                 const='chromium',
                                 dest='platform',
                                 help='Alias for --platform=chromium'),
            optparse.make_option(
                "--builder-name",
                help=
                ("The name of the builder shown on the waterfall running this script e.g. google-mac-2."
                 )),
            optparse.make_option(
                "--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=
                "Check to ensure the DumpRenderTree build is up-to-date (default)."
            ),
            optparse.make_option(
                "--no-build",
                dest="build",
                action="store_false",
                help=
                "Don't check to see if the DumpRenderTree build is up-to-date."
            ),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=600 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--pause-before-testing",
                dest="pause_before_testing",
                action="store_true",
                default=False,
                help=
                "Pause before running the tests to let user attach a performance monitor."
            ),
            optparse.make_option(
                "--no-results",
                action="store_false",
                dest="generate_results",
                default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option(
                "--output-json-path",
                help=
                "Path to generate a JSON file at; may contain previous results if it already exists."
            ),
            optparse.make_option(
                "--source-json-path",  # FIXME: Rename it to signify the fact it's a slave configuration.
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option(
                "--description",
                help=
                "Add a description to the output JSON file if one is generated"
            ),
            optparse.make_option(
                "--no-show-results",
                action="store_false",
                default=True,
                dest="show_results",
                help=
                "Don't launch a browser with results after the tests are done"
            ),
            optparse.make_option(
                "--test-results-server",
                help=
                "Upload the generated JSON file to the specified server when --output-json-path is present."
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay",
                                 dest="replay",
                                 action="store_true",
                                 default=False,
                                 help="Run replay tests."),
            optparse.make_option(
                "--force",
                dest="skipped",
                action="store_true",
                default=False,
                help="Run all tests, including the ones in the Skipped list."),
        ]
        return optparse.OptionParser(
            option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths,
                                     skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(
                path).replace('\\', '/')
            if self._port.skips_perf_test(
                    relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path,
                                                    path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" %
                       self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(
            sorted(list(tests), key=lambda test: test.test_name()), self._port)
        if self._options.generate_results:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(),
                                          self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp,
                                             options.description,
                                             options.platform,
                                             options.builder_name,
                                             options.build_number)

        if options.source_json_path:
            output = self._merge_slave_config_json(options.source_json_path,
                                                   output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        test_results_server = options.test_results_server
        results_page_path = None
        if not test_results_server:
            output = self._merge_outputs(output_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_MERGE
            results_page_path = self._host.filesystem.splitext(
                output_json_path)[0] + '.html'

        self._generate_output_files(output_json_path, results_page_path,
                                    output)

        if test_results_server:
            if not self._upload_json(test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING
        elif options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform,
                               builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {
                'timestamp': int(timestamp),
                'branch': self._default_branch,
                'platform': platform,
                'builder-name': builder_name,
                'build-number': int(build_number) if build_number else None
        }.items():
            if value:
                contents[key] = value

        return contents

    def _merge_slave_config_json(self, slave_config_json_path, output):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" %
                       slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(
                slave_config_json_path)
            slave_config = json.load(slave_config_json)
            return dict(slave_config.items() + output.items())
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" %
                       (slave_config_json_path, error))
        return None
Exemple #14
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3
    _EXIT_CODE_BAD_PREPARATION = -4

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
                help="Pause before running the tests to let user attach a performance monitor."),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results."),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="skipped", action="store_true", default=False,
                help="Run all tests, including the ones in the Skipped list."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self._EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
                branch, options.platform, options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
        contents = {'timestamp': int(timestamp), 'results': self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
Exemple #15
0
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _test_directories_for_chromium_style_tests = ['inspector']
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3

    def __init__(self,
                 regular_output=sys.stderr,
                 buildbot_output=sys.stdout,
                 args=None,
                 port=None):
        self._buildbot_output = buildbot_output
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform,
                                                     self._options)
        self._host._initialize_scm()
        self._printer = printing.Printer(self._port,
                                         self._options,
                                         regular_output,
                                         buildbot_output,
                                         configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug',
                                 action='store_const',
                                 const='Debug',
                                 dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release',
                                 action='store_const',
                                 const='Release',
                                 dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option(
                "--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--builder-name",
                help=
                ("The name of the builder shown on the waterfall running this script e.g. google-mac-2."
                 )),
            optparse.make_option(
                "--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help=
                "Check to ensure the DumpRenderTree build is up-to-date (default)."
            ),
            optparse.make_option(
                "--build-directory",
                help=
                "Path to the directory under which build files are kept (should not include configuration)"
            ),
            optparse.make_option("--time-out-ms",
                                 default=240 * 1000,
                                 help="Set the timeout for each test"),
            optparse.make_option(
                "--output-json-path",
                help="Filename of the JSON file that summaries the results"),
            optparse.make_option(
                "--source-json-path",
                help=
                "Path to a JSON file to be merged into the JSON file when --output-json-path is present"
            ),
            optparse.make_option(
                "--test-results-server",
                help=
                "Upload the generated JSON file to the specified server when --output-json-path is present"
            ),
        ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""
        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        skipped_directories = set(['.svn', 'resources'])
        tests = find_files.find(self._host.filesystem, self._base_path,
                                self._args, skipped_directories, _is_test_file)
        return [
            test for test in tests if not self._port.skips_perf_test(
                self._port.relative_perf_test_filename(test))
        ]

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" %
                       self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests()
            unexpected = self._run_tests_set(sorted(list(tests)), self._port)
        finally:
            self._printer.cleanup()

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(
                options.build_number) if options.build_number else None
            if not self._generate_json(
                    self._timestamp, options.output_json_path,
                    options.source_json_path, branch, options.platform,
                    options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(
                    test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path,
                       branch, platform, builder_name, build_number):
        revision = self._host.scm().head_svn_revision()
        contents = {
            'timestamp': int(timestamp),
            'revision': revision,
            'results': self._results
        }

        for key, value in {
                'branch': branch,
                'platform': platform,
                'builder-name': builder_name,
                'build-number': build_number
        }.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(
                    source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" %
                           (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
Exemple #16
0
class PerfTestsRunner(object):
    _default_branch = "webkit-trunk"
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option(
                "--debug",
                action="store_const",
                const="Debug",
                dest="configuration",
                help="Set the configuration to Debug",
            ),
            optparse.make_option(
                "--release",
                action="store_const",
                const="Release",
                dest="configuration",
                help="Set the configuration to Release",
            ),
            optparse.make_option("--platform", help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option(
                "--chromium",
                action="store_const",
                const="chromium",
                dest="platform",
                help="Alias for --platform=chromium",
            ),
            optparse.make_option(
                "--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2."),
            ),
            optparse.make_option("--build-number", help=("The build number of the builder running this script.")),
            optparse.make_option(
                "--build",
                dest="build",
                action="store_true",
                default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default).",
            ),
            optparse.make_option(
                "--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)",
            ),
            optparse.make_option("--time-out-ms", default=600 * 1000, help="Set the timeout for each test"),
            optparse.make_option(
                "--pause-before-testing",
                dest="pause_before_testing",
                action="store_true",
                default=False,
                help="Pause before running the tests to let user attach a performance monitor.",
            ),
            optparse.make_option("--output-json-path", help="Filename of the JSON file that summaries the results"),
            optparse.make_option(
                "--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present",
            ),
            optparse.make_option(
                "--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present",
            ),
            optparse.make_option(
                "--webkit-test-runner",
                "-2",
                action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree.",
            ),
        ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in [".html", ".svg"]

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set([".svn", "resources"])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace("\\", "/")
            if self._port.skips_perf_test(relative_path):
                continue
            tests.append(PerfTestFactory.create_perf_test(relative_path, path))

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        tests = self._collect_tests()
        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if (
                not self._generate_json(
                    self._timestamp,
                    options.output_json_path,
                    options.source_json_path,
                    branch,
                    options.platform,
                    options.builder_name,
                    build_number,
                )
                and not unexpected
            ):
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(
        self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number
    ):
        contents = {"timestamp": int(timestamp), "results": self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + "-revision"] = self._host.scm().svn_revision(path)

        for key, value in {
            "branch": branch,
            "platform": platform,
            "builder-name": builder_name,
            "build-number": build_number,
        }.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
Exemple #17
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    _EXIT_CODE_BAD_BUILD = -1
    _EXIT_CODE_BAD_JSON = -2
    _EXIT_CODE_FAILED_UPLOADING = -3
    _EXIT_CODE_BAD_PREPARATION = -4

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
                help="Pause before running the tests to let user attach a performance monitor."),
            optparse.make_option("--output-json-path",
                help="Filename of the JSON file that summaries the results."),
            optparse.make_option("--source-json-path",
                help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
            if self._port.skips_perf_test(relative_path):
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self._EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self._EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)

        options = self._options
        if self._options.output_json_path:
            # FIXME: Add --branch or auto-detect the branch we're in
            test_results_server = options.test_results_server
            branch = self._default_branch if test_results_server else None
            build_number = int(options.build_number) if options.build_number else None
            if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
                branch, options.platform, options.builder_name, build_number) and not unexpected:
                return self._EXIT_CODE_BAD_JSON
            if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
                return self._EXIT_CODE_FAILED_UPLOADING

        return unexpected

    def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
        contents = {'timestamp': int(timestamp), 'results': self._results}
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
            if value:
                contents[key] = value

        filesystem = self._host.filesystem
        succeeded = False
        if source_json_path:
            try:
                source_json_file = filesystem.open_text_file_for_reading(source_json_path)
                source_json = json.load(source_json_file)
                contents = dict(source_json.items() + contents.items())
                succeeded = True
            except IOError, error:
                _log.error("Failed to read %s: %s" % (source_json_path, error))
            except ValueError, error:
                _log.error("Failed to parse %s: %s" % (source_json_path, error))
            except TypeError, error:
                _log.error("Failed to merge JSON files: %s" % error)
Exemple #18
0
def main():
    options, args = parse_args()
    host = Host()
    host._initialize_scm()
    port = host.port_factory.get(options.platform, options)
    return run(port, options, args)
Exemple #19
0
class PerfTestsRunner(object):
    _default_branch = 'webkit-trunk'
    EXIT_CODE_BAD_BUILD = -1
    EXIT_CODE_BAD_SOURCE_JSON = -2
    EXIT_CODE_BAD_MERGE = -3
    EXIT_CODE_FAILED_UPLOADING = -4
    EXIT_CODE_BAD_PREPARATION = -5

    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'

    def __init__(self, args=None, port=None):
        self._options, self._args = PerfTestsRunner._parse_args(args)
        if port:
            self._port = port
            self._host = self._port.host
        else:
            self._host = Host()
            self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._host._initialize_scm()
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._port.perf_tests_dir()
        self._results = {}
        self._timestamp = time.time()

    @staticmethod
    def _parse_args(args=None):
        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                help='Set the configuration to Release'),
            optparse.make_option("--platform",
                help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--chromium",
                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
            optparse.make_option("--builder-name",
                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
            optparse.make_option("--build-number",
                help=("The build number of the builder running this script.")),
            optparse.make_option("--build", dest="build", action="store_true", default=True,
                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
            optparse.make_option("--no-build", dest="build", action="store_false",
                help="Don't check to see if the DumpRenderTree build is up-to-date."),
            optparse.make_option("--build-directory",
                help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=600 * 1000,
                help="Set the timeout for each test"),
            optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
                help="Pause before running the tests to let user attach a performance monitor."),
            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
                help="Do no generate results JSON and results page."),
            optparse.make_option("--output-json-path",
                help="Path to generate a JSON file at; may contain previous results if it already exists."),
            optparse.make_option("--source-json-path",  # FIXME: Rename it to signify the fact it's a slave configuration.
                help="Only used on bots. Path to a slave configuration file."),
            optparse.make_option("--description",
                help="Add a description to the output JSON file if one is generated"),
            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
                help="Don't launch a browser with results after the tests are done"),
            optparse.make_option("--test-results-server",
                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
            optparse.make_option("--webkit-test-runner", "-2", action="store_true",
                help="Use WebKitTestRunner rather than DumpRenderTree."),
            optparse.make_option("--replay", dest="replay", action="store_true", default=False,
                help="Run replay tests."),
            optparse.make_option("--force", dest="skipped", action="store_true", default=False,
                help="Run all tests, including the ones in the Skipped list."),
            ]
        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        test_extensions = ['.html', '.svg']
        if self._options.replay:
            test_extensions.append('.replay')

        def _is_test_file(filesystem, dirname, filename):
            return filesystem.splitext(filename)[1] in test_extensions

        filesystem = self._host.filesystem

        paths = []
        for arg in self._args:
            paths.append(arg)
            relpath = filesystem.relpath(arg, self._base_path)
            if relpath:
                paths.append(relpath)

        skipped_directories = set(['.svn', 'resources'])
        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
        tests = []
        for path in test_files:
            relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
            if self._port.skips_perf_test(relative_path) and not self._options.skipped:
                continue
            test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
            tests.append(test)

        return tests

    def run(self):
        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return self.EXIT_CODE_BAD_BUILD

        tests = self._collect_tests()
        _log.info("Running %d tests" % len(tests))

        for test in tests:
            if not test.prepare(self._options.time_out_ms):
                return self.EXIT_CODE_BAD_PREPARATION

        unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
        if self._options.generate_results:
            exit_code = self._generate_and_show_results()
            if exit_code:
                return exit_code

        return unexpected

    def _output_json_path(self):
        output_json_path = self._options.output_json_path
        if output_json_path:
            return output_json_path
        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)

    def _generate_and_show_results(self):
        options = self._options
        output_json_path = self._output_json_path()
        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)

        if options.source_json_path:
            output = self._merge_slave_config_json(options.source_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_SOURCE_JSON

        test_results_server = options.test_results_server
        results_page_path = None
        if not test_results_server:
            output = self._merge_outputs(output_json_path, output)
            if not output:
                return self.EXIT_CODE_BAD_MERGE
            results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
        else:
            # FIXME: Remove this code once webkit-perf.appspot.com supported "values".
            for result in output['results'].values():
                if isinstance(result, dict) and 'values' in result:
                    del result['values']

        self._generate_output_files(output_json_path, results_page_path, output)

        if test_results_server:
            if not self._upload_json(test_results_server, output_json_path):
                return self.EXIT_CODE_FAILED_UPLOADING
        elif options.show_results:
            self._port.show_results_html_file(results_page_path)

    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
        contents = {'results': self._results}
        if description:
            contents['description'] = description
        for (name, path) in self._port.repository_paths():
            contents[name + '-revision'] = self._host.scm().svn_revision(path)

        # FIXME: Add --branch or auto-detect the branch we're in
        for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
            'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
            if value:
                contents[key] = value

        return contents

    def _merge_slave_config_json(self, slave_config_json_path, output):
        if not self._host.filesystem.isfile(slave_config_json_path):
            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
            return None

        try:
            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
            slave_config = json.load(slave_config_json)
            return dict(slave_config.items() + output.items())
        except Exception, error:
            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
        return None
Exemple #20
0
class PerfTestsRunner(object):
    _perf_tests_base_dir = 'PerformanceTests'
    _test_directories_for_chromium_style_tests = ['inspector']

    def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None):
        self._buildbot_output = buildbot_output
        self._options, self._args = self._parse_args(args)
        self._host = Host()
        self._host._initialize_scm()
        self._port = self._host.port_factory.get(self._options.platform, self._options)
        self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
        self._webkit_base_dir_len = len(self._port.webkit_base())
        self._base_path = self._host.filesystem.join(self._port.webkit_base(), self._perf_tests_base_dir)

    def _parse_args(self, args=None):
        print_options = printing.print_options()

        perf_option_list = [
            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
                                 help='Set the configuration to Debug'),
            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
                                 help='Set the configuration to Release'),
            optparse.make_option("--platform",
                                 help="Specify port/platform being tested (i.e. chromium-mac)"),
            optparse.make_option("--build-directory",
                                 help="Path to the directory under which build files are kept (should not include configuration)"),
            optparse.make_option("--time-out-ms", default=30000,
                                 help="Set the timeout for each test"),
            ]

        option_list = (perf_option_list + print_options)
        return optparse.OptionParser(option_list=option_list).parse_args(args)

    def _collect_tests(self):
        """Return the list of tests found."""

        def _is_test_file(filesystem, dirname, filename):
            return filename.endswith('.html')

        return find_files.find(self._host.filesystem, self._base_path, paths=self._args, file_filter=_is_test_file)

    def run(self):
        if self._options.help_printing:
            self._printer.help_printing()
            self._printer.cleanup()
            return 0

        if not self._port.check_build(needs_http=False):
            _log.error("Build not up to date for %s" % self._port._path_to_driver())
            return -1

        # We wrap any parts of the run that are slow or likely to raise exceptions
        # in a try/finally to ensure that we clean up the logging configuration.
        unexpected = -1
        try:
            tests = self._collect_tests()
            unexpected = self._run_tests_set(tests, self._port)
        finally:
            self._printer.cleanup()

        return unexpected

    def _print_status(self, tests, expected, unexpected):
        if len(tests) == expected + unexpected:
            status = "Ran %d tests" % len(tests)
        else:
            status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
        if unexpected:
            status += " (%d didn't run)" % unexpected
        self._printer.write(status)

    def _run_tests_set(self, tests, port):
        result_count = len(tests)
        expected = 0
        unexpected = 0
        driver_need_restart = False
        driver = None

        for test in tests:
            if driver_need_restart:
                _log.debug("%s killing driver" % test)
                driver.stop()
                driver = None
            if not driver:
                driver = port.create_driver(worker_number=1)

            relative_test_path = self._host.filesystem.relpath(test, self._base_path)
            self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))

            is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
            test_failed, driver_need_restart = self._run_single_test(test, driver, is_chromium_style)
            if test_failed:
                unexpected = unexpected + 1
            else:
                expected = expected + 1

            self._printer.write('')

        if driver:
            driver.stop()

        return unexpected

    _inspector_result_regex = re.compile('^RESULT .*$')

    def _process_chromium_style_test_result(self, test, output):
        test_failed = False
        got_a_result = False
        for line in re.split('\n', output.text):
            if self._inspector_result_regex.match(line):
                self._buildbot_output.write("%s\n" % line)
                got_a_result = True
            elif not len(line) == 0:
                test_failed = True
                self._printer.write("%s" % line)
        return test_failed or not got_a_result

    _lines_to_ignore_in_parser_result = [
        re.compile(r'^Running \d+ times$'),
        re.compile(r'^Ignoring warm-up '),
        re.compile(r'^\d+$'),
    ]

    def _should_ignore_line_in_parser_test_result(self, line):
        if not line:
            return True
        for regex in self._lines_to_ignore_in_parser_result:
            if regex.match(line):
                return True
        return False

    def _process_parser_test_result(self, test, output):
        got_a_result = False
        test_failed = False
        filesystem = self._host.filesystem
        category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
        test_name = filesystem.splitext(test_name)[0]
        results = {}
        keys = ['avg', 'median', 'stdev', 'min', 'max']
        score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
        for line in re.split('\n', output.text):
            score = score_regex.match(line)
            if score:
                results[score.group(1)] = score.group(2)
                continue

            if not self._should_ignore_line_in_parser_test_result(line):
                test_failed = True
                self._printer.write("%s" % line)

        if test_failed or set(keys) != set(results.keys()):
            return True
        self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
        self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
        return False

    def _run_single_test(self, test, driver, is_chromium_style):
        test_failed = False
        driver_need_restart = False
        output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))

        if output.text == None:
            test_failed = True
        elif output.timeout:
            self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
            test_failed = True
            driver_need_restart = True
        elif output.crash:
            self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
            driver_need_restart = True
            test_failed = True
        else:
            if is_chromium_style:
                test_failed = self._process_chromium_style_test_result(test, output)
            else:
                test_failed = self._process_parser_test_result(test, output)

        if len(output.error):
            self._printer.write('error:\n%s' % output.error)
            test_failed = True

        if test_failed:
            self._printer.write('FAILED')

        return test_failed, driver_need_restart