Esempio n. 1
0
    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)
Esempio n. 2
0
    def __init__(self, *args, **kwargs):
        super(IOSSimulatorPort, self).__init__(*args, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)
Esempio n. 3
0
    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)
Esempio n. 4
0
    def __init__(self, *args, **kwargs):
        super(IOSSimulatorPort, self).__init__(*args, **kwargs)

        self._architecture = self.get_option('architecture')

        if not self._architecture:
            self._architecture = 'x86_64'

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)
        mac_config = port_config.Config(self._executive, self._filesystem, 'mac')
        self._mac_build_directory = mac_config.build_directory(self.get_option('configuration'))

        self._testing_device = None
Esempio n. 5
0
class MacPort(ApplePort):
    port_name = "mac"

    VERSION_FALLBACK_ORDER = ['mac-snowleopard', 'mac-lion', 'mac-mountainlion']

    ARCHITECTURES = ['x86_64', 'x86']

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)
        self._architecture = self.get_option('architecture')

        if not self._architecture:
            self._architecture = 'x86_64'

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(MacPort, self).default_timeout_ms()

    def supports_per_test_timeout(self):
        return True

    def _build_driver_flags(self):
        return ['ARCHS=i386'] if self.architecture() == 'x86' else []

    def should_retry_crashes(self):
        # On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
        return True

    def default_baseline_search_path(self):
        name = self._name.replace('-wk2', '')
        if name.endswith(self.FUTURE_VERSION):
            fallback_names = [self.port_name]
        else:
            fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(name):-1] + [self.port_name]
        if self.get_option('webkit_test_runner'):
            fallback_names = [self._wk2_port_name(), 'wk2'] + fallback_names
        return map(self._webkit_baseline_path, fallback_names)

    def _port_specific_expectations_files(self):
        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self.baseline_search_path()]))

    def setup_environ_for_server(self, server_name=None):
        env = super(MacPort, self).setup_environ_for_server(server_name)
        if server_name == self.driver_name():
            if self.get_option('leaks'):
                env['MallocStackLogging'] = '1'
            if self.get_option('guard_malloc'):
                env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib:' + self._build_path("libWebCoreTestShim.dylib")
            else:
                env['DYLD_INSERT_LIBRARIES'] = self._build_path("libWebCoreTestShim.dylib")
        env['XML_CATALOG_FILES'] = ''  # work around missing /etc/catalog <rdar://problem/4292995>
        return env

    def operating_system(self):
        return 'mac'

    # Belongs on a Platform object.
    def is_snowleopard(self):
        return self._version == "snowleopard"

    # Belongs on a Platform object.
    def is_lion(self):
        return self._version == "lion"

    def default_child_processes(self):
        if self._version == "snowleopard":
            _log.warning("Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.")
            return 1

        default_count = super(MacPort, self).default_child_processes()

        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906  With too many WebProcess WK2 tests get stuck in resource contention.
        # To alleviate the issue reduce the number of running processes
        # Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
        should_throttle_for_wk2 = self.get_option('webkit_test_runner') and default_count > 4
        # We also want to throttle for leaks bots.
        if should_throttle_for_wk2 or self.get_option('leaks'):
            default_count = int(.75 * default_count)

        # Make sure we have enough ram to support that many instances:
        total_memory = self.host.platform.total_bytes_memory()
        if total_memory:
            bytes_per_drt = 256 * 1024 * 1024  # Assume each DRT needs 256MB to run.
            overhead = 2048 * 1024 * 1024  # Assume we need 2GB free for the O/S
            supportable_instances = max((total_memory - overhead) / bytes_per_drt, 1)  # Always use one process, even if we don't have space for it.
            if supportable_instances < default_count:
                _log.warning("This machine could support %s child processes, but only has enough memory for %s." % (default_count, supportable_instances))
        else:
            _log.warning("Cannot determine available memory for child processes, using default child process count of %s." % default_count)
            supportable_instances = default_count
        return min(supportable_instances, default_count)

    def _build_java_test_support(self):
        java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
        build_java = [self.make_command(), "-C", java_tests_path]
        if self._executive.run_command(build_java, return_exit_code=True):  # Paths are absolute, so we don't need to set a cwd.
            _log.error("Failed to build Java support files: %s" % build_java)
            return False
        return True

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option('leaks'):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s!" % (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found!" % unique_leaks)

    def _check_port_build(self):
        return self.get_option('nojava') or self._build_java_test_support()

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen([self.path_to_script('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
            cwd=self.webkit_base(), stdout=file(os.devnull), stderr=file(os.devnull))

    # FIXME: The next two routines turn off the http locking in order
    # to work around failures on the bots caused when the slave restarts.
    # See https://bugs.webkit.org/show_bug.cgi?id=64886 for more info.
    # The proper fix is to make sure the slave is actually stopping NRWT
    # properly on restart. Note that by removing the lock file and not waiting,
    # the result should be that if there is a web server already running,
    # it'll be killed and this one will be started in its place; this
    # may lead to weird things happening in the other run. However, I don't
    # think we're (intentionally) actually running multiple runs concurrently
    # on any Mac bots.

    def acquire_http_lock(self):
        pass

    def release_http_lock(self):
        pass

    def sample_file_path(self, name, pid):
        return self._filesystem.join(self.results_directory(), "{0}-{1}-sample.txt".format(name, pid))

    def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
        # Note that we do slow-spin here and wait, since it appears the time
        # ReportCrash takes to actually write and flush the file varies when there are
        # lots of simultaneous crashes going on.
        # FIXME: Should most of this be moved into CrashLogs()?
        time_fn = time_fn or time.time
        sleep_fn = sleep_fn or time.sleep
        crash_log = ''
        crash_logs = CrashLogs(self.host)
        now = time_fn()
        # FIXME: delete this after we're sure this code is working ...
        _log.debug('looking for crash log for %s:%s' % (name, str(pid)))
        deadline = now + 5 * int(self.get_option('child_processes', 1))
        while not crash_log and now <= deadline:
            crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
            if not wait_for_log:
                break
            if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith('ERROR')]:
                sleep_fn(0.1)
                now = time_fn()

        if not crash_log:
            return (stderr, None)
        return (stderr, crash_log)

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        return crash_logs

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = self.sample_file_path(process_name, pid)
            if not self._filesystem.isfile(sample_file):
                continue
            sample_files[test_name] = sample_file
        return sample_files

    def sample_process(self, name, pid):
        try:
            hang_report = self.sample_file_path(name, pid)
            self._executive.run_command([
                "/usr/bin/sample",
                pid,
                10,
                10,
                "-file",
                hang_report,
            ])
        except ScriptError as e:
            _log.warning('Unable to sample process:' + str(e))

    def _path_to_helper(self):
        binary_name = 'LayoutTestHelper'
        return self._build_path(binary_name)

    def start_helper(self):
        helper_path = self._path_to_helper()
        if helper_path:
            _log.debug("Starting layout helper %s" % helper_path)
            self._helper = self._executive.popen([helper_path],
                stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
            is_ready = self._helper.stdout.readline()
            if not is_ready.startswith('ready'):
                _log.error("LayoutTestHelper failed to be ready")

    def stop_helper(self):
        if self._helper:
            _log.debug("Stopping LayoutTestHelper")
            try:
                self._helper.stdin.write("x\n")
                self._helper.stdin.close()
                self._helper.wait()
            except IOError, e:
                _log.debug("IOError raised while stopping helper: %s" % str(e))
            self._helper = None
 def _make_detector(self):
     return LeakDetector(self._mock_port())
Esempio n. 7
0
class DarwinPort(ApplePort):

    SDK = None

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(DarwinPort, self).default_timeout_ms()

    def _port_specific_expectations_files(self):
        return list(
            reversed([
                self._filesystem.join(self._webkit_baseline_path(p),
                                      'TestExpectations')
                for p in self.baseline_search_path()
            ]))

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option('leaks'):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(
            self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(
            leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." %
                  (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen(
            [self.path_to_script('run-safari')] +
            self._arguments_for_configuration() +
            ['--no-saved-state', '-NSOpen', results_filename],
            cwd=self.webkit_base(),
            stdout=file(os.devnull),
            stderr=file(os.devnull))

    def _merge_crash_logs(self, logs, new_logs, crashed_processes):
        for test, crash_log in new_logs.iteritems():
            try:
                process_name = test.split("-")[0]
                pid = int(test.split("-")[1])
            except IndexError:
                continue
            if not any(entry[1] == process_name and entry[2] == pid
                       for entry in crashed_processes):
                # if this is a new crash, then append the logs
                logs[test] = crash_log
        return logs

    def _look_for_all_crash_logs_in_log_dir(self, newer_than):
        crash_log = CrashLogs(self.host)
        return crash_log.find_all_logs(include_errors=True,
                                       newer_than=newer_than)

    def _get_crash_log(self,
                       name,
                       pid,
                       stdout,
                       stderr,
                       newer_than,
                       time_fn=None,
                       sleep_fn=None,
                       wait_for_log=True):
        return super(DarwinPort, self)._get_crash_log(name, pid, stdout,
                                                      stderr, newer_than)

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name,
                                            pid,
                                            None,
                                            None,
                                            start_time,
                                            wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        all_crash_log = self._look_for_all_crash_logs_in_log_dir(start_time)
        return self._merge_crash_logs(crash_logs, all_crash_log,
                                      crashed_processes)

    def sample_process(self, name, pid, target_host=None):
        host = target_host or self.host
        tempdir = host.filesystem.mkdtemp()
        exit_status = host.executive.run_command([
            "/usr/bin/sudo",
            "-n",
            "/usr/sbin/spindump",
            pid,
            10,
            10,
            "-file",
            DarwinPort.spindump_file_path(host, name, pid, str(tempdir)),
        ],
                                                 return_exit_code=True)
        if exit_status:
            try:
                host.executive.run_command([
                    "/usr/bin/sample",
                    pid,
                    10,
                    10,
                    "-file",
                    DarwinPort.sample_file_path(host, name, pid, str(tempdir)),
                ])
                host.filesystem.move_to_base_host(
                    DarwinPort.sample_file_path(host, name, pid, str(tempdir)),
                    DarwinPort.sample_file_path(self.host, name, pid,
                                                self.results_directory()))
            except ScriptError as e:
                _log.warning('Unable to sample process:' + str(e))
        else:
            host.filesystem.move_to_base_host(
                DarwinPort.spindump_file_path(host, name, pid, str(tempdir)),
                DarwinPort.spindump_file_path(self.host, name, pid,
                                              self.results_directory()))
        host.filesystem.rmtree(str(tempdir))

    @staticmethod
    def sample_file_path(host, name, pid, directory):
        return host.filesystem.join(directory,
                                    "{0}-{1}-sample.txt".format(name, pid))

    @staticmethod
    def spindump_file_path(host, name, pid, directory):
        return host.filesystem.join(directory,
                                    "{0}-{1}-spindump.txt".format(name, pid))

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = DarwinPort.sample_file_path(self.host, process_name,
                                                      pid,
                                                      self.results_directory())
            if self._filesystem.isfile(sample_file):
                sample_files[test_name] = sample_file
            else:
                spindump_file = DarwinPort.spindump_file_path(
                    self.host, process_name, pid, self.results_directory())
                if self._filesystem.isfile(spindump_file):
                    sample_files[test_name] = spindump_file
        return sample_files

    def make_command(self):
        return self.xcrun_find('make', '/usr/bin/make')

    def nm_command(self):
        return self.xcrun_find('nm', 'nm')

    def xcrun_find(self, command, fallback=None):
        fallback = fallback or command
        try:
            return self._executive.run_command(
                ['xcrun', '--sdk', self.SDK, '-find', command]).rstrip()
        except ScriptError:
            _log.warn("xcrun failed; falling back to '%s'." % fallback)
            return fallback

    @memoized
    def app_identifier_from_bundle(self, app_bundle):
        plist_path = self._filesystem.join(app_bundle, 'Info.plist')
        if not self._filesystem.exists(plist_path):
            plist_path = self._filesystem.join(app_bundle, 'Contents',
                                               'Info.plist')
        if not self._filesystem.exists(plist_path):
            return None
        return self._executive.run_command([
            '/usr/libexec/PlistBuddy', '-c', 'Print CFBundleIdentifier',
            plist_path
        ]).rstrip()
Esempio n. 8
0
class DarwinPort(ApplePort):

    CURRENT_VERSION = None
    SDK = None

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(DarwinPort, self).default_timeout_ms()

    def _port_specific_expectations_files(self, device_type=None):
        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self.baseline_search_path(device_type=device_type)]))

    def check_for_leaks(self, process_name, process_id):
        if not self.get_option('leaks'):
            return

        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_id)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." % (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        with open(os.devnull) as devnull:
            self._executive.popen([self.path_to_script('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
                cwd=self.webkit_base(), stdout=devnull, stderr=devnull)

    @memoized
    def path_to_crash_logs(self):
        log_directory = self.host.filesystem.expanduser('~')
        log_directory = self.host.filesystem.join(log_directory, 'Library', 'Logs')
        diagnositc_reports_directory = self.host.filesystem.join(log_directory, 'DiagnosticReports')
        if self.host.filesystem.exists(diagnositc_reports_directory):
            return diagnositc_reports_directory
        return self.host.filesystem.join(log_directory, 'CrashReporter')

    def _merge_crash_logs(self, logs, new_logs, crashed_processes):
        for test, crash_log in new_logs.iteritems():
            try:
                if test.split('-')[0] == 'Sandbox':
                    process_name = test.split('-')[1]
                    pid = int(test.split('-')[2])
                else:
                    process_name = test.split('-')[0]
                    pid = int(test.split('-')[1])
            except (IndexError, ValueError):
                continue
            if not any(entry[1] == process_name and entry[2] == pid for entry in crashed_processes):
                # if this is a new crash, then append the logs
                logs[test] = crash_log
        return logs

    def _look_for_all_crash_logs_in_log_dir(self, newer_than):
        crash_log = CrashLogs(self.host, self.path_to_crash_logs(), crash_logs_to_skip=self._crash_logs_to_skip_for_host.get(self.host, []))
        return crash_log.find_all_logs(newer_than=newer_than)

    def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True, target_host=None):
        # Note that we do slow-spin here and wait, since it appears the time
        # ReportCrash takes to actually write and flush the file varies when there are
        # lots of simultaneous crashes going on.
        time_fn = time_fn or time.time
        sleep_fn = sleep_fn or time.sleep
        crash_log = ''
        crash_logs = CrashLogs(target_host or self.host, self.path_to_crash_logs(), crash_logs_to_skip=self._crash_logs_to_skip_for_host.get(target_host or self.host, []))
        now = time_fn()
        deadline = now + 5 * int(self.get_option('child_processes', 1))
        while not crash_log and now <= deadline:
            crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
            if not wait_for_log:
                break
            if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith('ERROR')]:
                sleep_fn(0.1)
                now = time_fn()

        if not crash_log:
            return (stderr, None)
        return (stderr, crash_log)

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        all_crash_log = self._look_for_all_crash_logs_in_log_dir(start_time)
        return self._merge_crash_logs(crash_logs, all_crash_log, crashed_processes)

    def sample_process(self, name, pid, target_host=None):
        host = target_host or self.host
        tempdir = host.filesystem.mkdtemp()
        temp_tailspin_file_path = host.filesystem.join(str(tempdir), "{0}-{1}-tailspin-temp.txt".format(name, pid))
        command = [
            '/usr/bin/tailspin',
            'save',
            '-n',
            temp_tailspin_file_path,
        ]
        if host.platform.is_mac():
            command = ['/usr/bin/sudo', '-n'] + command

        exit_status = host.executive.run_command(command, return_exit_code=True)
        if not exit_status:  # Symbolicate tailspin log using spindump
            spindump_command = [
                '/usr/sbin/spindump',
                '-i', temp_tailspin_file_path,
                '-file', DarwinPort.tailspin_file_path(host, name, pid, str(tempdir)),
            ]
            try:
                exit_code = host.executive.run_command(spindump_command + ['-noBulkSymbolication'], return_exit_code=True)

                # FIXME: Remove the fallback when we no longer support Catalina.
                if exit_code:
                    host.executive.run_command(spindump_command)
                host.filesystem.move_to_base_host(DarwinPort.tailspin_file_path(host, name, pid, str(tempdir)),
                                                  DarwinPort.tailspin_file_path(self.host, name, pid, self.results_directory()))
            except (IOError, ScriptError) as e:
                _log.warning('Unable to symbolicate tailspin log of process:' + str(e))
        else:  # Tailspin failed, run sample instead
            try:
                host.executive.run_command([
                    '/usr/bin/sample',
                    pid,
                    10,
                    10,
                    '-file',
                    DarwinPort.sample_file_path(host, name, pid, str(tempdir)),
                ])
                host.filesystem.move_to_base_host(DarwinPort.sample_file_path(host, name, pid, str(tempdir)),
                                                  DarwinPort.sample_file_path(self.host, name, pid, self.results_directory()))
            except ScriptError as e:
                _log.warning('Unable to sample process:' + str(e))
        host.filesystem.rmtree(str(tempdir))

    @staticmethod
    def sample_file_path(host, name, pid, directory):
        return host.filesystem.join(directory, "{0}-{1}-sample.txt".format(name, pid))

    @staticmethod
    def tailspin_file_path(host, name, pid, directory):
        return host.filesystem.join(directory, "{0}-{1}-tailspin.txt".format(name, pid))

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = DarwinPort.sample_file_path(self.host, process_name, pid, self.results_directory())
            if self._filesystem.isfile(sample_file):
                sample_files[test_name] = sample_file
            else:
                tailspin_file = DarwinPort.tailspin_file_path(self.host, process_name, pid, self.results_directory())
                if self._filesystem.isfile(tailspin_file):
                    sample_files[test_name] = tailspin_file
        return sample_files

    def _path_to_image_diff(self):
        # ImageDiff for DarwinPorts is a little complicated. It will either be in
        # a directory named ../mac relative to the port build directory, in a directory
        # named ../<build-type> relative to the port build directory or in the port build directory
        _image_diff_in_build_path = super(DarwinPort, self)._path_to_image_diff()
        _port_build_dir = self.host.filesystem.dirname(_image_diff_in_build_path)

        # Test ../mac
        _path_to_test = self.host.filesystem.join(_port_build_dir, '..', 'mac', 'ImageDiff')
        if self.host.filesystem.exists(_path_to_test):
            return _path_to_test

        # Test ../<build-type>
        _build_type = self.host.filesystem.basename(_port_build_dir).split('-')[0]
        _path_to_test = self.host.filesystem.join(_port_build_dir, '..', _build_type, 'ImageDiff')
        if self.host.filesystem.exists(_path_to_test):
            return _path_to_test

        return _image_diff_in_build_path

    def make_command(self):
        return self.xcrun_find('make', '/usr/bin/make')

    def xcrun_find(self, command, fallback=None):
        fallback = fallback or command
        try:
            return self._executive.run_command(['xcrun', '--sdk', self.SDK, '-find', command]).rstrip()
        except ScriptError:
            _log.warn("xcrun failed; falling back to '%s'." % fallback)
            return fallback

    @memoized
    def _plist_data_from_bundle(self, app_bundle, entry):
        plist_path = self._filesystem.join(app_bundle, 'Info.plist')
        if not self._filesystem.exists(plist_path):
            plist_path = self._filesystem.join(app_bundle, 'Contents', 'Info.plist')
        if not self._filesystem.exists(plist_path):
            return None
        return self._executive.run_command(['/usr/libexec/PlistBuddy', '-c', 'Print {}'.format(entry), plist_path]).rstrip()

    def app_identifier_from_bundle(self, app_bundle):
        return self._plist_data_from_bundle(app_bundle, 'CFBundleIdentifier')

    def app_executable_from_bundle(self, app_bundle):
        return self._plist_data_from_bundle(app_bundle, 'CFBundleExecutable')

    def environment_for_api_tests(self):
        environment = super(DarwinPort, self).environment_for_api_tests()
        build_root_path = str(self._build_path())
        for name in ['DYLD_LIBRARY_PATH', '__XPC_DYLD_LIBRARY_PATH', 'DYLD_FRAMEWORK_PATH', '__XPC_DYLD_FRAMEWORK_PATH']:
            self._append_value_colon_separated(environment, name, build_root_path)
        return environment
Esempio n. 9
0
File: mac.py Progetto: fzbing/webkit
class MacPort(ApplePort):
    port_name = "mac"

    VERSION_FALLBACK_ORDER = [
        'mac-snowleopard', 'mac-lion', 'mac-mountainlion', 'mac-mavericks',
        'mac-yosemite'
    ]

    ARCHITECTURES = ['x86_64', 'x86']

    DEFAULT_ARCHITECTURE = 'x86_64'

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(MacPort, self).default_timeout_ms()

    def supports_per_test_timeout(self):
        return True

    def _build_driver_flags(self):
        return ['ARCHS=i386'] if self.architecture() == 'x86' else []

    def should_retry_crashes(self):
        # On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
        return True

    def default_baseline_search_path(self):
        name = self._name.replace('-wk2', '')
        wk_version = [] if self.get_option('webkit_test_runner') else [
            'mac-wk1'
        ]
        if name.endswith(self.FUTURE_VERSION):
            fallback_names = wk_version + [self.port_name]
        else:
            fallback_names = self.VERSION_FALLBACK_ORDER[
                self.VERSION_FALLBACK_ORDER.index(name):-1] + wk_version + [
                    self.port_name
                ]
        # FIXME: mac-wk2 should appear at the same place as mac-wk1.
        if self.get_option('webkit_test_runner'):
            fallback_names = [self._wk2_port_name(), 'wk2'] + fallback_names
        return map(self._webkit_baseline_path, fallback_names)

    def _port_specific_expectations_files(self):
        return list(
            reversed([
                self._filesystem.join(self._webkit_baseline_path(p),
                                      'TestExpectations')
                for p in self.baseline_search_path()
            ]))

    def configuration_specifier_macros(self):
        return {
            "mavericks+": ["mavericks", "yosemite", "future"],
            "yosemite+": ["yosemite", "future"],
        }

    def setup_environ_for_server(self, server_name=None):
        env = super(MacPort, self).setup_environ_for_server(server_name)
        if server_name == self.driver_name():
            if self.get_option('leaks'):
                env['MallocStackLogging'] = '1'
            if self.get_option('guard_malloc'):
                env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib:' + self._build_path(
                    "libWebCoreTestShim.dylib")
            else:
                env['DYLD_INSERT_LIBRARIES'] = self._build_path(
                    "libWebCoreTestShim.dylib")
        env['XML_CATALOG_FILES'] = ''  # work around missing /etc/catalog <rdar://problem/4292995>
        return env

    def operating_system(self):
        return 'mac'

    # Belongs on a Platform object.
    def is_mavericks(self):
        return self._version == 'mavericks'

    def default_child_processes(self):
        if self._version == "snowleopard":
            _log.warning(
                "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525."
            )
            return 1

        default_count = super(MacPort, self).default_child_processes()

        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906  With too many WebProcess WK2 tests get stuck in resource contention.
        # To alleviate the issue reduce the number of running processes
        # Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
        should_throttle_for_wk2 = self.get_option(
            'webkit_test_runner') and default_count > 4
        # We also want to throttle for leaks bots.
        if should_throttle_for_wk2 or self.get_option('leaks'):
            default_count = int(.75 * default_count)

        # Make sure we have enough ram to support that many instances:
        total_memory = self.host.platform.total_bytes_memory()
        if total_memory:
            bytes_per_drt = 256 * 1024 * 1024  # Assume each DRT needs 256MB to run.
            overhead = 2048 * 1024 * 1024  # Assume we need 2GB free for the O/S
            supportable_instances = max(
                (total_memory - overhead) / bytes_per_drt, 1
            )  # Always use one process, even if we don't have space for it.
            if supportable_instances < default_count:
                _log.warning(
                    "This machine could support %s child processes, but only has enough memory for %s."
                    % (default_count, supportable_instances))
        else:
            _log.warning(
                "Cannot determine available memory for child processes, using default child process count of %s."
                % default_count)
            supportable_instances = default_count
        return min(supportable_instances, default_count)

    def _build_java_test_support(self):
        java_tests_path = self._filesystem.join(self.layout_tests_dir(),
                                                "java")
        build_java = [self.make_command(), "-C", java_tests_path]
        if self._executive.run_command(
                build_java, return_exit_code=True
        ):  # Paths are absolute, so we don't need to set a cwd.
            _log.error("Failed to build Java support files: %s" % build_java)
            return False
        return True

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option('leaks'):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(
            self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(
            leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." %
                  (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _check_port_build(self):
        return not self.get_option('java') or self._build_java_test_support()

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen(
            [self.path_to_script('run-safari')] +
            self._arguments_for_configuration() +
            ['--no-saved-state', '-NSOpen', results_filename],
            cwd=self.webkit_base(),
            stdout=file(os.devnull),
            stderr=file(os.devnull))

    def sample_file_path(self, name, pid):
        return self._filesystem.join(self.results_directory(),
                                     "{0}-{1}-sample.txt".format(name, pid))

    def _get_crash_log(self,
                       name,
                       pid,
                       stdout,
                       stderr,
                       newer_than,
                       time_fn=None,
                       sleep_fn=None,
                       wait_for_log=True):
        # Note that we do slow-spin here and wait, since it appears the time
        # ReportCrash takes to actually write and flush the file varies when there are
        # lots of simultaneous crashes going on.
        # FIXME: Should most of this be moved into CrashLogs()?
        time_fn = time_fn or time.time
        sleep_fn = sleep_fn or time.sleep
        crash_log = ''
        crash_logs = CrashLogs(self.host)
        now = time_fn()
        # FIXME: delete this after we're sure this code is working ...
        _log.debug('looking for crash log for %s:%s' % (name, str(pid)))
        deadline = now + 5 * int(self.get_option('child_processes', 1))
        while not crash_log and now <= deadline:
            crash_log = crash_logs.find_newest_log(name,
                                                   pid,
                                                   include_errors=True,
                                                   newer_than=newer_than)
            if not wait_for_log:
                break
            if not crash_log or not [
                    line for line in crash_log.splitlines()
                    if not line.startswith('ERROR')
            ]:
                sleep_fn(0.1)
                now = time_fn()

        if not crash_log:
            return (stderr, None)
        return (stderr, crash_log)

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name,
                                            pid,
                                            None,
                                            None,
                                            start_time,
                                            wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        return crash_logs

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = self.sample_file_path(process_name, pid)
            if not self._filesystem.isfile(sample_file):
                continue
            sample_files[test_name] = sample_file
        return sample_files

    def sample_process(self, name, pid):
        try:
            hang_report = self.sample_file_path(name, pid)
            self._executive.run_command([
                "/usr/bin/sample",
                pid,
                10,
                10,
                "-file",
                hang_report,
            ])
        except ScriptError as e:
            _log.warning('Unable to sample process:' + str(e))

    def _path_to_helper(self):
        binary_name = 'LayoutTestHelper'
        return self._build_path(binary_name)

    def start_helper(self, pixel_tests=False):
        helper_path = self._path_to_helper()
        if helper_path:
            _log.debug("Starting layout helper %s" % helper_path)
            arguments = [helper_path, '--install-color-profile']
            self._helper = self._executive.popen(arguments,
                                                 stdin=self._executive.PIPE,
                                                 stdout=self._executive.PIPE,
                                                 stderr=None)
            is_ready = self._helper.stdout.readline()
            if not is_ready.startswith('ready'):
                _log.error("LayoutTestHelper failed to be ready")

    def reset_preferences(self):
        _log.debug("Resetting persistent preferences")

        for domain in ["DumpRenderTree", "WebKitTestRunner"]:
            try:
                self._executive.run_command(["defaults", "delete", domain])
            except ScriptError, e:
                # 'defaults' returns 1 if the domain did not exist
                if e.exit_code != 1:
                    raise e
Esempio n. 10
0
class DarwinPort(ApplePort):

    SDK = None

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(DarwinPort, self).default_timeout_ms()

    def _port_specific_expectations_files(self):
        return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self.baseline_search_path()]))

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option('leaks'):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." % (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen([self.path_to_script('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
            cwd=self.webkit_base(), stdout=file(os.devnull), stderr=file(os.devnull))

    def _merge_crash_logs(self, logs, new_logs, crashed_processes):
        for test, crash_log in new_logs.iteritems():
            try:
                process_name = test.split("-")[0]
                pid = int(test.split("-")[1])
            except IndexError:
                continue
            if not any(entry[1] == process_name and entry[2] == pid for entry in crashed_processes):
                # if this is a new crash, then append the logs
                logs[test] = crash_log
        return logs

    def _look_for_all_crash_logs_in_log_dir(self, newer_than):
        crash_log = CrashLogs(self.host)
        return crash_log.find_all_logs(include_errors=True, newer_than=newer_than)

    def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
        return None

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        all_crash_log = self._look_for_all_crash_logs_in_log_dir(start_time)
        return self._merge_crash_logs(crash_logs, all_crash_log, crashed_processes)

    def sample_process(self, name, pid):
        exit_status = self._executive.run_command([
            "/usr/bin/sudo",
            "-n",
            "/usr/sbin/spindump",
            pid,
            10,
            10,
            "-file",
            self.spindump_file_path(name, pid),
        ], return_exit_code=True)
        if exit_status:
            try:
                self._executive.run_command([
                    "/usr/bin/sample",
                    pid,
                    10,
                    10,
                    "-file",
                    self.sample_file_path(name, pid),
                ])
            except ScriptError as e:
                _log.warning('Unable to sample process:' + str(e))

    def sample_file_path(self, name, pid):
        return self._filesystem.join(self.results_directory(), "{0}-{1}-sample.txt".format(name, pid))

    def spindump_file_path(self, name, pid):
        return self._filesystem.join(self.results_directory(), "{0}-{1}-spindump.txt".format(name, pid))

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = self.sample_file_path(process_name, pid)
            if not self._filesystem.isfile(sample_file):
                continue
            sample_files[test_name] = sample_file
        return sample_files

    def make_command(self):
        return self.xcrun_find('make', '/usr/bin/make')

    def nm_command(self):
        return self.xcrun_find('nm', 'nm')

    def xcrun_find(self, command, fallback=None):
        fallback = fallback or command
        try:
            return self._executive.run_command(['xcrun', '--sdk', self.SDK, '-find', command]).rstrip()
        except ScriptError:
            _log.warn("xcrun failed; falling back to '%s'." % fallback)
            return fallback
Esempio n. 11
0
class MacPort(ApplePort):
    port_name = "mac"

    VERSION_FALLBACK_ORDER = [
        "mac-snowleopard",
        "mac-lion",
        "mac-mountainlion",
        "mac-mavericks",
        "mac-yosemite",
        "mac-elcapitan",
    ]

    ARCHITECTURES = ["x86_64", "x86"]

    DEFAULT_ARCHITECTURE = "x86_64"

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option("guard_malloc"):
            return 350 * 1000
        return super(MacPort, self).default_timeout_ms()

    def supports_per_test_timeout(self):
        return True

    def _build_driver_flags(self):
        return ["ARCHS=i386"] if self.architecture() == "x86" else []

    def should_retry_crashes(self):
        # On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
        return True

    def default_baseline_search_path(self):
        name = self._name.replace("-wk2", "")
        wk_version = [] if self.get_option("webkit_test_runner") else ["mac-wk1"]
        if name.endswith(self.FUTURE_VERSION):
            fallback_names = wk_version + [self.port_name]
        else:
            fallback_names = (
                self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(name) : -1]
                + wk_version
                + [self.port_name]
            )
        # FIXME: mac-wk2 should appear at the same place as mac-wk1.
        if self.get_option("webkit_test_runner"):
            fallback_names = [self._wk2_port_name(), "wk2"] + fallback_names
        return map(self._webkit_baseline_path, fallback_names)

    def _port_specific_expectations_files(self):
        return list(
            reversed(
                [
                    self._filesystem.join(self._webkit_baseline_path(p), "TestExpectations")
                    for p in self.baseline_search_path()
                ]
            )
        )

    def configuration_specifier_macros(self):
        return {
            "elcapitan+": ["elcapitan", "future"],
            "mavericks+": ["mavericks", "yosemite", "elcapitan", "future"],
            "yosemite+": ["yosemite", "elcapitan", "future"],
        }

    def setup_environ_for_server(self, server_name=None):
        env = super(MacPort, self).setup_environ_for_server(server_name)
        if server_name == self.driver_name():
            if self.get_option("leaks"):
                env["MallocStackLogging"] = "1"
            if self.get_option("guard_malloc"):
                self._append_value_colon_separated(env, "DYLD_INSERT_LIBRARIES", "/usr/lib/libgmalloc.dylib")
            self._append_value_colon_separated(
                env, "DYLD_INSERT_LIBRARIES", self._build_path("libWebCoreTestShim.dylib")
            )
        env["XML_CATALOG_FILES"] = ""  # work around missing /etc/catalog <rdar://problem/4292995>
        return env

    def _clear_global_caches_and_temporary_files(self):
        self._filesystem.rmtree(os.path.expanduser("~/Library/" + self.driver_name()))
        self._filesystem.rmtree(os.path.expanduser("~/Library/Application Support/" + self.driver_name()))
        self._filesystem.rmtree(os.path.expanduser("~/Library/Caches/" + self.driver_name()))
        self._filesystem.rmtree(os.path.expanduser("~/Library/WebKit/" + self.driver_name()))

    def remove_cache_directory(self, name):
        self._filesystem.rmtree(os.confstr(65538) + name)

    def operating_system(self):
        return "mac"

    # Belongs on a Platform object.
    def is_mavericks(self):
        return self._version == "mavericks"

    def default_child_processes(self):
        if self._version == "snowleopard":
            _log.warning("Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.")
            return 1

        default_count = super(MacPort, self).default_child_processes()

        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906  With too many WebProcess WK2 tests get stuck in resource contention.
        # To alleviate the issue reduce the number of running processes
        # Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
        should_throttle_for_wk2 = self.get_option("webkit_test_runner") and default_count > 4
        # We also want to throttle for leaks bots.
        if should_throttle_for_wk2 or self.get_option("leaks"):
            default_count = int(0.75 * default_count)

        # Make sure we have enough ram to support that many instances:
        total_memory = self.host.platform.total_bytes_memory()
        if total_memory:
            bytes_per_drt = 256 * 1024 * 1024  # Assume each DRT needs 256MB to run.
            overhead = 2048 * 1024 * 1024  # Assume we need 2GB free for the O/S
            supportable_instances = max(
                (total_memory - overhead) / bytes_per_drt, 1
            )  # Always use one process, even if we don't have space for it.
            if supportable_instances < default_count:
                _log.warning(
                    "This machine could support %s child processes, but only has enough memory for %s."
                    % (default_count, supportable_instances)
                )
        else:
            _log.warning(
                "Cannot determine available memory for child processes, using default child process count of %s."
                % default_count
            )
            supportable_instances = default_count
        return min(supportable_instances, default_count)

    def _build_java_test_support(self):
        java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
        build_java = [self.make_command(), "-C", java_tests_path]
        if self._executive.run_command(
            build_java, return_exit_code=True
        ):  # Paths are absolute, so we don't need to set a cwd.
            _log.error("Failed to build Java support files: %s" % build_java)
            return False
        return True

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option("leaks"):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option("leaks"):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." % (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _check_port_build(self):
        return not self.get_option("java") or self._build_java_test_support()

    def _path_to_webcore_library(self):
        return self._build_path("WebCore.framework/Versions/A/WebCore")

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen(
            [self.path_to_script("run-safari")]
            + self._arguments_for_configuration()
            + ["--no-saved-state", "-NSOpen", results_filename],
            cwd=self.webkit_base(),
            stdout=file(os.devnull),
            stderr=file(os.devnull),
        )

    def sample_file_path(self, name, pid):
        return self._filesystem.join(self.results_directory(), "{0}-{1}-sample.txt".format(name, pid))

    def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
        # Note that we do slow-spin here and wait, since it appears the time
        # ReportCrash takes to actually write and flush the file varies when there are
        # lots of simultaneous crashes going on.
        # FIXME: Should most of this be moved into CrashLogs()?
        time_fn = time_fn or time.time
        sleep_fn = sleep_fn or time.sleep
        crash_log = ""
        crash_logs = CrashLogs(self.host)
        now = time_fn()
        # FIXME: delete this after we're sure this code is working ...
        _log.debug("looking for crash log for %s:%s" % (name, str(pid)))
        deadline = now + 5 * int(self.get_option("child_processes", 1))
        while not crash_log and now <= deadline:
            crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
            if not wait_for_log:
                break
            if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith("ERROR")]:
                sleep_fn(0.1)
                now = time_fn()

        if not crash_log:
            return (stderr, None)
        return (stderr, crash_log)

    def _merge_crash_logs(self, logs, new_logs, crashed_processes):
        for test, crash_log in new_logs.iteritems():
            try:
                process_name = test.split("-")[0]
                pid = int(test.split("-")[1])
            except IndexError:
                continue
            if not any(entry[1] == process_name and entry[2] == pid for entry in crashed_processes):
                # if this is a new crash, then append the logs
                logs[test] = crash_log
        return logs

    def _look_for_all_crash_logs_in_log_dir(self, newer_than):
        crash_log = CrashLogs(self.host)
        return crash_log.find_all_logs(include_errors=True, newer_than=newer_than)

    def look_for_new_crash_logs(self, crashed_processes, start_time):
        """Since crash logs can take a long time to be written out if the system is
           under stress do a second pass at the end of the test run.

           crashes: test_name -> pid, process_name tuple of crashed process
           start_time: time the tests started at.  We're looking for crash
               logs after that time.
        """
        crash_logs = {}
        for (test_name, process_name, pid) in crashed_processes:
            # Passing None for output.  This is a second pass after the test finished so
            # if the output had any logging we would have already collected it.
            crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
            if not crash_log:
                continue
            crash_logs[test_name] = crash_log
        all_crash_log = self._look_for_all_crash_logs_in_log_dir(start_time)
        return self._merge_crash_logs(crash_logs, all_crash_log, crashed_processes)

    def look_for_new_samples(self, unresponsive_processes, start_time):
        sample_files = {}
        for (test_name, process_name, pid) in unresponsive_processes:
            sample_file = self.sample_file_path(process_name, pid)
            if not self._filesystem.isfile(sample_file):
                continue
            sample_files[test_name] = sample_file
        return sample_files

    def sample_process(self, name, pid):
        try:
            hang_report = self.sample_file_path(name, pid)
            self._executive.run_command(["/usr/bin/sample", pid, 10, 10, "-file", hang_report])
        except ScriptError as e:
            _log.warning("Unable to sample process:" + str(e))

    def _path_to_helper(self):
        binary_name = "LayoutTestHelper"
        return self._build_path(binary_name)

    def start_helper(self, pixel_tests=False):
        helper_path = self._path_to_helper()
        if not helper_path:
            _log.error("No path to LayoutTestHelper binary")
            return False
        _log.debug("Starting layout helper %s" % helper_path)
        arguments = [helper_path, "--install-color-profile"]
        self._helper = self._executive.popen(
            arguments, stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None
        )
        is_ready = self._helper.stdout.readline()
        if not is_ready.startswith("ready"):
            _log.error("LayoutTestHelper could not start")
            return False
        return True

    def reset_preferences(self):
        _log.debug("Resetting persistent preferences")

        for domain in ["DumpRenderTree", "WebKitTestRunner"]:
            try:
                self._executive.run_command(["defaults", "delete", domain])
            except ScriptError, e:
                # 'defaults' returns 1 if the domain did not exist
                if e.exit_code != 1:
                    raise e
Esempio n. 12
0
class DarwinPort(ApplePort):

    CURRENT_VERSION = None
    SDK = None

    def __init__(self, host, port_name, **kwargs):
        ApplePort.__init__(self, host, port_name, **kwargs)

        self._leak_detector = LeakDetector(self)
        if self.get_option("leaks"):
            # DumpRenderTree slows down noticably if we run more than about 1000 tests in a batch
            # with MallocStackLogging enabled.
            self.set_option_default("batch_size", 1000)

    def default_timeout_ms(self):
        if self.get_option('guard_malloc'):
            return 350 * 1000
        return super(DarwinPort, self).default_timeout_ms()

    def _port_specific_expectations_files(self, device_type=None):
        return list(
            reversed([
                self._filesystem.join(self._webkit_baseline_path(p),
                                      'TestExpectations')
                for p in self.baseline_search_path(device_type=device_type)
            ]))

    def check_for_leaks(self, process_name, process_pid):
        if not self.get_option('leaks'):
            return
        # We could use http://code.google.com/p/psutil/ to get the process_name from the pid.
        self._leak_detector.check_for_leaks(process_name, process_pid)

    def print_leaks_summary(self):
        if not self.get_option('leaks'):
            return
        # We're in the manager process, so the leak detector will not have a valid list of leak files.
        # FIXME: This is a hack, but we don't have a better way to get this information from the workers yet.
        # FIXME: This will include too many leaks in subsequent runs until the results directory is cleared!
        leaks_files = self._leak_detector.leaks_files_in_directory(
            self.results_directory())
        if not leaks_files:
            return
        total_bytes_string, unique_leaks = self._leak_detector.count_total_bytes_and_unique_leaks(
            leaks_files)
        total_leaks = self._leak_detector.count_total_leaks(leaks_files)
        _log.info("%s total leaks found for a total of %s." %
                  (total_leaks, total_bytes_string))
        _log.info("%s unique leaks found." % unique_leaks)

    def _path_to_webcore_library(self):
        return self._build_path('WebCore.framework/Versions/A/WebCore')

    def show_results_html_file(self, results_filename):
        # We don't use self._run_script() because we don't want to wait for the script
        # to exit and we want the output to show up on stdout in case there are errors
        # launching the browser.
        self._executive.popen(
            [self.path_to_script('run-safari')] +
            self._arguments_for_configuration() +
            ['--no-saved-state', '-NSOpen', results_filename],
            cwd=self.webkit_base(),
            stdout=file(os.devnull),
            stderr=file(os.devnull))

    @memoized
    def path_to_crash_logs(self):
        log_directory = self.host.filesystem.expanduser('~')
        log_directory = self.host.filesystem.join(log_directory, 'Library',
                                                  'Logs')
        diagnositc_reports_directory = self.host.filesystem.join(
            log_directory, 'DiagnosticReports')
        if self.host.filesystem.exists(diagnositc_reports_directory):
            return diagnositc_reports_directory
        return self.host.filesystem.join(log_directory, 'CrashReporter')

    def _merge_crash_logs(self, logs, new_logs, crashed_processes):
        for test, crash_log in new_logs.iteritems():
            try:
                if test.split('-')[0] == 'Sandbox':
                    process_name = test.split('-')[1]
                    pid = int(test.split('-')[2])
                else:
                    process_name = test.split('-')[0]
                    pid = int(test.split('-')[1])
            except IndexError, ValueError:
                continue
            if not any(entry[1] == process_name and entry[2] == pid
                       for entry in crashed_processes):
                # if this is a new crash, then append the logs
                logs[test] = crash_log
        return logs
Esempio n. 13
0
 def _make_leak_detector(self):
     return LeakDetector(self)