Example #1
0
def main(host=None, args=None):
    options = parse_args(args)
    if options.no_autopep8:
        options.style = None

    if options.leave_strings_alone:
        options.quoting = None

    autopep8_options = _autopep8_options_for_style(options.style)
    fixers = ['webkitpy.formatter.fix_docstrings']
    fixers.extend(_fixers_for_quoting(options.quoting))

    if options.files == ['-']:
        host = host or SystemHost()
        host.print_(reformat_source(host.stdin.read(), autopep8_options,
                                    fixers, '<stdin>'),
                    end='')
        return

    # We create the arglist before checking if we need to create a Host, because a
    # real host is non-picklable and can't be passed to host.executive.map().

    arglist = [(host, name, autopep8_options, fixers, options.backup)
               for name in options.files]
    host = host or SystemHost()

    host.executive.map(_reformat_thunk, arglist, processes=options.jobs)
    def setUp(self):
        # A real PlatformInfo object is used here instead of a
        # MockPlatformInfo because we need to actually check for
        # Windows and Mac to skip some tests.
        self._platform = SystemHost().platform

        # FIXME: Remove this when we fix test-webkitpy to work
        # properly on cygwin (bug 63846).
        self.should_test_processes = not self._platform.is_win()
Example #3
0
    def __init__(self):
        SystemHost.__init__(self)
        self._checkout = None

        # FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
        # In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
        # so for now we just pass along the whole Host object.
        # FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
        self.port_factory = PortFactory(self)

        self._engage_awesome_locale_hacks()
Example #4
0
    def _create_or_find_device_for_request(request, host=SystemHost(), name_base='Managed'):
        assert isinstance(request, DeviceRequest)

        device = SimulatedDeviceManager._find_exisiting_device_for_request(request)
        if device:
            return device

        name = SimulatedDeviceManager._find_available_name(name_base)
        device_type = SimulatedDeviceManager._disambiguate_device_type(request.device_type)
        runtime = SimulatedDeviceManager.get_runtime_for_device_type(device_type)
        device_identifier = SimulatedDeviceManager._get_device_identifier_for_type(device_type)

        assert runtime is not None
        assert device_identifier is not None

        for device in SimulatedDeviceManager.available_devices(host):
            if device.platform_device.name == name:
                device.platform_device._delete()
                break

        _log.debug(u"Creating device '{}', of type {}".format(name, device_type))
        host.executive.run_command([SimulatedDeviceManager.xcrun, 'simctl', 'create', name, device_identifier, runtime.identifier])

        # We just added a device, so our list of _available_devices needs to be re-synced.
        SimulatedDeviceManager.populate_available_devices(host)
        for device in SimulatedDeviceManager.available_devices(host):
            if device.platform_device.name == name:
                device.platform_device.managed_by_script = True
                return device
        return None
Example #5
0
 def test_virtual_test_suites(self):
     # We test that we can load the real LayoutTests/VirtualTestSuites file properly, so we
     # use a real SystemHost(). We don't care what virtual_test_suites() returns as long
     # as it is iterable.
     port = self.make_port(host=SystemHost(), port_name=self.full_port_name)
     self.assertTrue(
         isinstance(port.virtual_test_suites(), collections.Iterable))
Example #6
0
    def populate_available_devices(host=SystemHost()):
        if not host.platform.is_mac():
            return

        try:
            simctl_json = json.loads(
                host.executive.run_command(
                    [SimulatedDeviceManager.xcrun, 'simctl', 'list',
                     '--json']))
        except (ValueError, ScriptError):
            return

        SimulatedDeviceManager._device_identifier_to_name = {
            device['identifier']: device['name']
            for device in simctl_json['devicetypes']
        }
        SimulatedDeviceManager.AVAILABLE_RUNTIMES = SimulatedDeviceManager._create_runtimes(
            simctl_json['runtimes'])

        for runtime in SimulatedDeviceManager.AVAILABLE_RUNTIMES:
            for device_json in simctl_json['devices'][runtime.name]:
                device = SimulatedDeviceManager._create_device_with_runtime(
                    host, runtime, device_json)
                if not device:
                    continue

                # Update device state from simctl output.
                device.platform_device._state = SimulatedDevice.NAME_FOR_STATE.index(
                    device_json['state'].upper())
                device.platform_device._last_updated_state = time.time()
        return
Example #7
0
    def test_basic(self):
        cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
        host = SystemHost()
        factory = PortFactory(host)
        port = factory.get()
        now = time.time()
        proc = server_process.ServerProcess(port, 'python', cmd)
        proc.write('')

        self.assertEqual(proc.poll(), None)
        self.assertFalse(proc.has_crashed())

        # check that doing a read after an expired deadline returns
        # nothing immediately.
        line = proc.read_stdout_line(now - 1)
        self.assertEqual(line, None)

        # FIXME: This part appears to be flaky. line should always be non-None.
        # FIXME: https://bugs.webkit.org/show_bug.cgi?id=88280
        line = proc.read_stdout_line(now + 1.0)
        if line:
            self.assertEqual(line.strip(), "stdout")

        line = proc.read_stderr_line(now + 1.0)
        if line:
            self.assertEqual(line.strip(), "stderr")

        proc.stop(0)
Example #8
0
    def swap(device,
             request,
             host=SystemHost(),
             name_base='Managed',
             timeout=SIMULATOR_BOOT_TIMEOUT):
        if SimulatedDeviceManager.INITIALIZED_DEVICES is None:
            raise RuntimeError(
                'Cannot swap when there are no initialized devices')
        if device not in SimulatedDeviceManager.INITIALIZED_DEVICES:
            raise RuntimeError(
                '{} is not initialized, cannot swap it'.format(device))

        index = SimulatedDeviceManager.INITIALIZED_DEVICES.index(device)
        SimulatedDeviceManager.INITIALIZED_DEVICES[index] = None
        device.platform_device._tear_down()

        device = SimulatedDeviceManager._create_or_find_device_for_request(
            request, host, name_base)
        assert device

        if not device.platform_device.is_booted_or_booting(force_update=True):
            device.platform_device.booted_by_script = True
            _log.debug("Booting device '{}'".format(device.udid))
            host.executive.run_command(
                [SimulatedDeviceManager.xcrun, 'simctl', 'boot', device.udid])
        SimulatedDeviceManager.INITIALIZED_DEVICES[index] = device

        deadline = time.time() + timeout
        SimulatedDeviceManager._wait_until_device_is_usable(
            device, max(0, deadline - time.time()))
Example #9
0
    def populate_available_devices(host=SystemHost()):
        if not host.platform.is_mac():
            return

        try:
            simctl_json = json.loads(host.executive.run_command([SimulatedDeviceManager.xcrun, 'simctl', 'list', '--json'], decode_output=False))
        except (ValueError, ScriptError):
            return

        SimulatedDeviceManager._device_identifier_to_name = {device['identifier']: device['name'] for device in simctl_json['devicetypes']}
        SimulatedDeviceManager.AVAILABLE_RUNTIMES = SimulatedDeviceManager._create_runtimes(simctl_json['runtimes'])

        for runtime in SimulatedDeviceManager.AVAILABLE_RUNTIMES:
            # Needed for <rdar://problem/47122965>
            devices = []
            if isinstance(simctl_json['devices'], list):
                for devices_for_runtime in simctl_json['devices']:
                    if devices_for_runtime['name'] == runtime.name:
                        devices = devices_for_runtime['devices']
                        break
            else:
                devices = simctl_json['devices'].get(runtime.name, None) or simctl_json['devices'].get(runtime.identifier, [])

            for device_json in devices:
                device = SimulatedDeviceManager._create_device_with_runtime(host, runtime, device_json)
                if not device:
                    continue

                # Update device state from simctl output.
                device.platform_device._state = SimulatedDevice.NAME_FOR_STATE.index(device_json['state'].upper())
                device.platform_device._last_updated_state = time.time()
        return
Example #10
0
    def setup(cls):
        LLDB_WEBKIT_TESTER_NAME = 'lldbWebKitTester'
        BREAK_FOR_TESTING_FUNCTION_NAME = 'breakForTestingSummaryProviders'

        cls.sbDebugger = lldb.SBDebugger.Create()
        cls.sbDebugger.SetAsync(False)

        host = SystemHost()
        config = Config(host.executive, host.filesystem)
        cls.lldbWebKitTesterExecutable = os.path.join(config.build_directory(config.default_configuration()), LLDB_WEBKIT_TESTER_NAME)

        cls.sbTarget = cls.sbDebugger.CreateTarget(str(cls.lldbWebKitTesterExecutable))
        assert cls.sbTarget
        cls.sbTarget.BreakpointCreateByName(BREAK_FOR_TESTING_FUNCTION_NAME, cls.sbTarget.GetExecutable().GetFilename())

        argv = None
        envp = None
        cls.sbProcess = cls.sbTarget.LaunchSimple(argv, envp, os.getcwd())
        assert cls.sbProcess
        assert cls.sbProcess.GetState() == lldb.eStateStopped

        cls.sbThread = cls.sbProcess.GetThreadAtIndex(0)
        assert cls.sbThread

        # Frame 0 is the function with name BREAK_FOR_TESTING_FUNCTION_NAME. We want the frame of the caller of
        # BREAK_FOR_TESTING_FUNCTION_NAME because it has all the interesting local variables we want to test.
        cls.sbFrame = cls.sbThread.GetFrameAtIndex(1)
        assert cls.sbFrame
Example #11
0
    def __init__(self):
        SystemHost.__init__(self)
        self.web = web.Web()

        self._scm = None

        # Everything below this line is WebKit-specific and belongs on a higher-level object.
        self.buildbot = buildbot.BuildBot()

        # FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
        # In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
        # so for now we just pass along the whole Host object.
        # FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
        self.port_factory = PortFactory(self)

        self._engage_awesome_locale_hacks()
Example #12
0
    def create_crash_logs_darwin(self):
        if not SystemHost().platform.is_mac():
            return

        self.older_mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28528)
        self.mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28530)
        self.newer_mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28529)
        self.other_process_mock_crash_report = make_mock_crash_report_darwin(
            'FooProcess', 28527)
        self.misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_darwin(
            'DumpRenderTree', 28526)[200:]
        self.files = {}
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash'] = self.older_mock_crash_report
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash'] = self.mock_crash_report
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash'] = self.newer_mock_crash_report
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash'] = None
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash'] = self.other_process_mock_crash_report
        self.files[
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash'] = self.misformatted_mock_crash_report
        self.filesystem = MockFileSystem(self.files)
        crash_logs = CrashLogs(MockSystemHost(filesystem=self.filesystem),
                               CrashLogsTest.DARWIN_MOCK_CRASH_DIRECTORY)
        logs = self.filesystem.files_under(
            '/Users/mock/Library/Logs/DiagnosticReports/')
        for path in reversed(sorted(logs)):
            self.assertTrue(path in self.files.keys())
        return crash_logs
Example #13
0
    def serial_test_process_crashing(self):
        # Give -u switch to force stdout to be unbuffered for Windows
        cmd = [
            sys.executable, '-uc',
            'import sys; print("stdout 1"); print("stdout 2"); print("stdout 3"); sys.stdin.readline(); sys.exit(1);'
        ]
        host = SystemHost()
        factory = PortFactory(host)
        port = factory.get()
        now = time.time()
        proc = server_process.ServerProcess(port, 'python', cmd)
        proc.write(b'')

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), b'stdout 1')

        proc.write(b'End\n')
        time.sleep(0.1)  # Give process a moment to close.

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), b'stdout 2')

        self.assertEqual(True, proc.has_crashed())

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line, None)

        proc.stop(0)
Example #14
0
    def test_find_log_darwin(self):
        if not SystemHost().platform.is_mac():
            return

        crash_logs = self.create_crash_logs_darwin()
        log = crash_logs.find_newest_log("DumpRenderTree")
        self.assertMultiLineEqual(log, self.newer_mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28529)
        self.assertMultiLineEqual(log, self.newer_mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28530)
        self.assertMultiLineEqual(log, self.mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28531)
        self.assertIsNone(log)
        log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
        self.assertIsNone(log)

        def bad_read(path):
            raise IOError('IOError: No such file or directory')

        def bad_mtime(path):
            raise OSError('OSError: No such file or directory')

        self.filesystem.read_text_file = bad_read
        log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
        self.assertIn('IOError: No such file or directory', log)

        self.filesystem = MockFileSystem(self.files)
        crash_logs = CrashLogs(MockSystemHost(filesystem=self.filesystem))
        self.filesystem.mtime = bad_mtime
        log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
        self.assertIn('OSError: No such file or directory', log)
Example #15
0
    def test_basic(self):
        cmd = [
            sys.executable, '-c',
            'import sys; print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"; sys.stdin.readline();'
        ]
        host = SystemHost()
        factory = PortFactory(host)
        port = factory.get()
        now = time.time()
        proc = server_process.ServerProcess(port, 'python', cmd)
        proc.write('')

        if sys.platform.startswith('win'):
            self.assertEqual(proc.poll(), 0)
        else:
            self.assertEqual(proc.poll(), None)
        self.assertFalse(proc.has_crashed())

        # check that doing a read after an expired deadline returns
        # nothing immediately.
        line = proc.read_stdout_line(now - 1)
        self.assertEqual(line, None)

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), "stdout")

        line = proc.read_stderr_line(now + 1.0)
        self.assertEqual(line.strip(), "stderr")

        proc.write('End\n')
        proc.stop(0)
Example #16
0
 def _boot_device(device, host=None):
     host = host or SystemHost()
     _log.debug(u"Booting device '{}'".format(device.udid))
     device.platform_device.booted_by_script = True
     host.executive.run_command(
         [SimulatedDeviceManager.xcrun, 'simctl', 'boot', device.udid])
     SimulatedDeviceManager.INITIALIZED_DEVICES.append(device)
Example #17
0
    def test_process_crashing(self):
        cmd = [
            sys.executable, '-c',
            'import sys; print "stdout 1"; print "stdout 2"; print "stdout 3"; sys.stdout.flush(); sys.stdin.readline(); sys.exit(1);'
        ]
        host = SystemHost()
        factory = PortFactory(host)
        port = factory.get()
        now = time.time()
        proc = server_process.ServerProcess(port, 'python', cmd)
        proc.write('')

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), 'stdout 1')

        proc.write('End\n')
        time.sleep(0.1)  # Give process a moment to close.

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), 'stdout 2')

        self.assertEqual(True, proc.has_crashed())

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line, None)

        proc.stop(0)
Example #18
0
    def __init__(self):
        SystemHost.__init__(self)
        self.web = web.Web()

        self._scm = None

        # Everything below this line is WebKit-specific and belongs on a higher-level object.
        self.buildbot = buildbot.BuildBot()

        # FIXME: Unfortunately Port objects are currently the central-dispatch objects of the NRWT world.
        # In order to instantiate a port correctly, we have to pass it at least an executive, user, scm, and filesystem
        # so for now we just pass along the whole Host object.
        # FIXME: PortFactory doesn't belong on this Host object if Port is going to have a Host (circular dependency).
        self.port_factory = PortFactory(self)

        self._engage_awesome_locale_hacks()
Example #19
0
    def max_supported_simulators(host=SystemHost()):
        if not host.platform.is_mac():
            return 0

        try:
            system_process_count_limit = int(host.executive.run_command(['/usr/bin/ulimit', '-u']).strip())
            current_process_count = len(host.executive.run_command(['/bin/ps', 'aux']).strip().split('\n'))
            _log.debug(u'Process limit: {}, current #processes: {}'.format(system_process_count_limit, current_process_count))
        except (ValueError, ScriptError):
            return 0

        max_supported_simulators_for_hardware = min(
            host.executive.cpu_count() // 2,
            host.platform.total_bytes_memory() // SimulatedDeviceManager.MEMORY_ESTIMATE_PER_SIMULATOR_INSTANCE,
            SimulatedDeviceManager.MAX_NUMBER_OF_SIMULATORS,
        )
        max_supported_simulators_locally = (system_process_count_limit - current_process_count) // SimulatedDeviceManager.PROCESS_COUNT_ESTIMATE_PER_SIMULATOR_INSTANCE

        if (max_supported_simulators_locally < max_supported_simulators_for_hardware):
            _log.warn(u'This machine could support {} simulators, but is only configured for {}.'.format(max_supported_simulators_for_hardware, max_supported_simulators_locally))
            _log.warn('Please see <https://trac.webkit.org/wiki/IncreasingKernelLimits>.')

        if max_supported_simulators_locally == 0:
            max_supported_simulators_locally = 1

        return min(max_supported_simulators_locally, max_supported_simulators_for_hardware)
Example #20
0
    def tear_down(host=None, timeout=SIMULATOR_BOOT_TIMEOUT):
        host = host or SystemHost()
        if SimulatedDeviceManager._managing_simulator_app:
            host.executive.run_command(['killall', '-9', 'Simulator'], return_exit_code=True)
            SimulatedDeviceManager._managing_simulator_app = False

        if SimulatedDeviceManager.INITIALIZED_DEVICES is None:
            return

        deadline = time.time() + timeout
        while SimulatedDeviceManager.INITIALIZED_DEVICES:
            device = SimulatedDeviceManager.INITIALIZED_DEVICES[0]
            if device is None:
                SimulatedDeviceManager.INITIALIZED_DEVICES.remove(None)
                continue
            device.platform_device._tear_down(deadline - time.time())

        SimulatedDeviceManager.INITIALIZED_DEVICES = None

        if SimulatedDeviceManager._managing_simulator_app:
            for pid in host.executive.running_pids(lambda name: 'CoreSimulator.framework' in name):
                host.executive.kill_process(pid)

        # If we were managing the simulator, there are some cache files we need to remove
        for directory in host.filesystem.glob('/tmp/com.apple.CoreSimulator.SimDevice.*'):
            host.filesystem.rmtree(directory)
        core_simulator_directory = host.filesystem.expanduser(host.filesystem.join('~', 'Library', 'Developer', 'CoreSimulator'))
        host.filesystem.rmtree(host.filesystem.join(core_simulator_directory, 'Caches'))
        host.filesystem.rmtree(host.filesystem.join(core_simulator_directory, 'Temp'))
Example #21
0
    def serial_test_basic(self):
        # Give -u switch to force stdout and stderr to be unbuffered for Windows
        cmd = [
            sys.executable, '-uc',
            'import sys; print "stdout"; print >>sys.stderr, "stderr"; sys.stdin.readline();'
        ]
        host = SystemHost()
        factory = PortFactory(host)
        port = factory.get()
        now = time.time()
        proc = server_process.ServerProcess(port, 'python', cmd)
        proc.write('')

        self.assertEqual(proc.poll(), None)
        self.assertFalse(proc.has_crashed())

        # check that doing a read after an expired deadline returns
        # nothing immediately.
        line = proc.read_stdout_line(now - 1)
        self.assertEqual(line, None)

        line = proc.read_stdout_line(now + 1.0)
        self.assertEqual(line.strip(), "stdout")

        line = proc.read_stderr_line(now + 1.0)
        self.assertEqual(line.strip(), "stderr")

        proc.write('End\n')
        time.sleep(0.1)  # Give process a moment to close.
        self.assertEqual(proc.poll(), 0)

        proc.stop(0)
Example #22
0
    def test_find_log_darwin(self):
        if not SystemHost().platform.is_mac():
            return

        older_mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28528)
        mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28530)
        newer_mock_crash_report = make_mock_crash_report_darwin(
            'DumpRenderTree', 28529)
        other_process_mock_crash_report = make_mock_crash_report_darwin(
            'FooProcess', 28527)
        misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + \
            make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
        files = {
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash':
            older_mock_crash_report,
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash':
            mock_crash_report,
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash':
            newer_mock_crash_report,
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash':
            None,
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash':
            other_process_mock_crash_report,
            '/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash':
            misformatted_mock_crash_report,
        }
        filesystem = MockFileSystem(files)
        crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
        log = crash_logs.find_newest_log("DumpRenderTree")
        self.assertMultiLineEqual(log, newer_mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28529)
        self.assertMultiLineEqual(log, newer_mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28530)
        self.assertMultiLineEqual(log, mock_crash_report)
        log = crash_logs.find_newest_log("DumpRenderTree", 28531)
        self.assertIsNone(log)
        log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
        self.assertIsNone(log)

        def bad_read(path):
            raise IOError('IOError: No such file or directory')

        def bad_mtime(path):
            raise OSError('OSError: No such file or directory')

        filesystem.read_text_file = bad_read
        log = crash_logs.find_newest_log("DumpRenderTree",
                                         28531,
                                         include_errors=True)
        self.assertIn('IOError: No such file or directory', log)

        filesystem = MockFileSystem(files)
        crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
        filesystem.mtime = bad_mtime
        log = crash_logs.find_newest_log("DumpRenderTree",
                                         newer_than=1.0,
                                         include_errors=True)
        self.assertIn('OSError: No such file or directory', log)
Example #23
0
 def device_by_filter(filter, host=None):
     host = host or SystemHost()
     result = []
     for device in SimulatedDeviceManager.available_devices(host):
         if filter(device):
             result.append(device)
     return result
Example #24
0
    def configure(self, options):
        self.options = options

        if options.timing:
            # --timing implies --verbose
            options.verbose = max(options.verbose, 1)

        log_level = logging.INFO
        if options.quiet:
            log_level = logging.WARNING
        elif options.verbose == 2:
            log_level = logging.DEBUG

        self.meter = MeteredStream(
            self.stream, (options.verbose == 2),
            number_of_columns=SystemHost().platform.terminal_width())

        handler = logging.StreamHandler(self.stream)
        # We constrain the level on the handler rather than on the root
        # logger itself.  This is probably better because the handler is
        # configured and known only to this module, whereas the root logger
        # is an object shared (and potentially modified) by many modules.
        # Modifying the handler, then, is less intrusive and less likely to
        # interfere with modifications made by other modules (e.g. in unit
        # tests).
        handler.name = __name__
        handler.setLevel(log_level)
        formatter = logging.Formatter("%(message)s")
        handler.setFormatter(formatter)

        logger = logging.getLogger()
        logger.addHandler(handler)
        logger.setLevel(logging.NOTSET)

        # Filter out most webkitpy messages.
        #
        # Messages can be selectively re-enabled for this script by updating
        # this method accordingly.
        def filter_records(record):
            """Filter out autoinstall and non-third-party webkitpy messages."""
            # FIXME: Figure out a way not to use strings here, for example by
            #        using syntax like webkitpy.test.__name__.  We want to be
            #        sure not to import any non-Python 2.4 code, though, until
            #        after the version-checking code has executed.
            if (record.name.startswith("webkitpy.common.system.autoinstall")
                    or record.name.startswith("webkitpy.test")):
                return True
            if record.name.startswith("webkitpy"):
                return False
            return True

        testing_filter = logging.Filter()
        testing_filter.filter = filter_records

        # Display a message so developers are not mystified as to why
        # logging does not work in the unit tests.
        _log.info(
            "Suppressing most webkitpy logging while running unit tests.")
        handler.addFilter(testing_filter)
def main(host=None, args=None):
    options = parse_args(args)

    if options.files == ['-']:
        host = host or SystemHost()
        host.print_(reformat_source(host.stdin.read(), options.style,
                                    '<stdin>'),
                    end='')
        return

    # We create the arglist before checking if we need to create a Host, because a
    # real host is non-picklable and can't be passed to host.executive.map().
    arglist = [(host, name, options.style, options.backup)
               for name in options.files]
    host = host or SystemHost()

    host.executive.map(_reformat_thunk, arglist, processes=options.jobs)
Example #26
0
 def __init__(self, file_path, handle_style_error, scm=None, host=None):
     self._file_path = file_path
     self._handle_style_error = handle_style_error
     self._host = host or SystemHost()
     self._fs = self._host.filesystem
     self._detector = scm or SCMDetector(
         self._fs, self._host.executive).detect_scm_system(
             self._fs.getcwd())
Example #27
0
def reformat_file(host, name, autopep8_options, fixers, should_backup_file):
    host = host or SystemHost()
    source = host.filesystem.read_text_file(name)
    dest = reformat_source(source, autopep8_options, fixers, name)
    if dest != source:
        if should_backup_file:
            host.filesystem.write_text_file(name + '.bak', source)
        host.filesystem.write_text_file(name, dest)
Example #28
0
 def __init__(self, tool, options, host=None, scm=None):
     self._tool = tool
     self._options = options
     self._host = host or SystemHost()
     self._fs = self._host.filesystem
     self._detector = scm or SCMDetector(
         self._fs, self._host.executive).detect_scm_system(
             self._fs.getcwd())
Example #29
0
 def _boot_device(device, host=None):
     host = host or SystemHost()
     _log.debug(u"Booting device '{}'".format(device.udid))
     device.platform_device.booted_by_script = True
     host.executive.run_command([SimulatedDeviceManager.xcrun, 'simctl', 'boot', device.udid])
     SimulatedDeviceManager.INITIALIZED_DEVICES.append(device)
     # FIXME: Remove this delay once rdar://77234240 is resolved.
     time.sleep(10)
    def setUp(self):
        # A real PlatformInfo object is used here instead of a
        # MockPlatformInfo because we need to actually check for
        # Windows and Mac to skip some tests.
        self._platform = SystemHost().platform

        # FIXME: Remove this when we fix test-webkitpy to work
        # properly on cygwin (bug 63846).
        self.should_test_processes = not self._platform.is_win()
Example #31
0
    def test_find_all_log_darwin(self):
        if not SystemHost().platform.is_mac():
            return

        crash_logs = self.create_crash_logs_darwin()
        all_logs = crash_logs.find_all_logs()
        self.assertEqual(len(all_logs), 5)

        for test, crash_log in all_logs.iteritems():
            self.assertTrue(crash_log in self.files.values())
            self.assertTrue(test == "Unknown" or int(test.split("-")[1]) in range(28527, 28531))
Example #32
0
    def initialize_devices(requests, host=None, name_base='Managed', simulator_ui=True, timeout=SIMULATOR_BOOT_TIMEOUT, **kwargs):
        host = host or SystemHost()
        if SimulatedDeviceManager.INITIALIZED_DEVICES is not None:
            return SimulatedDeviceManager.INITIALIZED_DEVICES

        if not host.platform.is_mac():
            return None

        SimulatedDeviceManager.INITIALIZED_DEVICES = []
        atexit.register(SimulatedDeviceManager.tear_down)

        # Convert to iterable type
        if not hasattr(requests, '__iter__'):
            requests = [requests]

        # Check running sims
        for device in SimulatedDeviceManager.available_devices(host):
            matched_request = SimulatedDeviceManager._does_fulfill_request(device, requests)
            if matched_request is None:
                continue
            requests.remove(matched_request)
            _log.debug(u'Attached to running simulator {}'.format(device))
            SimulatedDeviceManager.INITIALIZED_DEVICES.append(device)

            # DeviceRequests are compared by reference
            requests_copy = [request for request in requests]

            # Merging requests means that if 4 devices are requested, but only one is running, these
            # 4 requests will be fulfilled by the 1 running device.
            for request in requests_copy:
                if not request.merge_requests:
                    continue
                if not request.use_booted_simulator:
                    continue
                if request.device_type != device.device_type and not request.allow_incomplete_match:
                    continue
                if request.device_type.software_variant != device.device_type.software_variant:
                    continue
                requests.remove(request)

        for request in requests:
            device = SimulatedDeviceManager._create_or_find_device_for_request(request, host, name_base)
            assert device is not None

            SimulatedDeviceManager._boot_device(device, host)

        if simulator_ui and host.executive.run_command(['killall', '-0', 'Simulator'], return_exit_code=True) != 0:
            SimulatedDeviceManager._managing_simulator_app = not host.executive.run_command(['open', '-g', '-b', SimulatedDeviceManager.simulator_bundle_id, '--args', '-PasteboardAutomaticSync', '0'], return_exit_code=True)

        deadline = time.time() + timeout
        for device in SimulatedDeviceManager.INITIALIZED_DEVICES:
            SimulatedDeviceManager._wait_until_device_is_usable(device, deadline)

        return SimulatedDeviceManager.INITIALIZED_DEVICES
class RunTest(unittest.TestCase, StreamTestingMixin):
    def setUp(self):
        # A real PlatformInfo object is used here instead of a
        # MockPlatformInfo because we need to actually check for
        # Windows and Mac to skip some tests.
        self._platform = SystemHost().platform

        # FIXME: Remove this when we fix test-webkitpy to work
        # properly on cygwin (bug 63846).
        self.should_test_processes = not self._platform.is_win()

    def test_basic(self):
        options, args = parse_args(tests_included=True)
        logging_stream = StringIO.StringIO()
        host = MockHost()
        port_obj = host.port_factory.get(options.platform, options)
        details = run_webkit_tests.run(port_obj, options, args, logging_stream)

        # These numbers will need to be updated whenever we add new tests.
        self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
        self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
        self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
        self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
        self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)

        one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
            details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
            len(details.initial_results.unexpected_results_by_name))
        self.assertTrue(one_line_summary in logging_stream.buflist)

        # Ensure the results were summarized properly.
        self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)

        # Ensure the image diff percentage is in the results.
        self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)

        # Ensure the results were written out and displayed.
        full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
        self.assertEqual(json.loads(json_to_eval), details.summarized_results)

        self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])


    def test_batch_size(self):
        batch_tests_run = get_test_batches(['--batch-size', '2'])
        for batch in batch_tests_run:
            self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))

    def test_max_locked_shards(self):
        # Tests for the default of using one locked shard even in the case of more than one child process.
        if not self.should_test_processes:
            return
        save_env_webkit_test_max_locked_shards = None
        if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
            save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
            del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
        _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
        try:
            self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
        finally:
            if save_env_webkit_test_max_locked_shards:
                os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards

    def test_child_processes_2(self):
        if self.should_test_processes:
            _, regular_output, _ = logging_run(
                ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
            self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))

    def test_child_processes_min(self):
        if self.should_test_processes:
            _, regular_output, _ = logging_run(
                ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
                tests_included=True, shared_port=False)
            self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))

    def test_dryrun(self):
        tests_run = get_tests_run(['--dry-run'])
        self.assertEqual(tests_run, [])

        tests_run = get_tests_run(['-n'])
        self.assertEqual(tests_run, [])

    def test_exception_raised(self):
        # Exceptions raised by a worker are treated differently depending on
        # whether they are in-process or out. inline exceptions work as normal,
        # which allows us to get the full stack trace and traceback from the
        # worker. The downside to this is that it could be any error, but this
        # is actually useful in testing.
        #
        # Exceptions raised in a separate process are re-packaged into
        # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
        # be printed, but don't display properly in the unit test exception handlers.
        self.assertRaises(BaseException, logging_run,
            ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)

        if self.should_test_processes:
            self.assertRaises(BaseException, logging_run,
                ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)

    def test_full_results_html(self):
        # FIXME: verify html?
        details, _, _ = logging_run(['--full-results-html'])
        self.assertEqual(details.exit_code, 0)

    def test_hung_thread(self):
        details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
        # Note that hang.html is marked as WontFix and all WontFix tests are
        # expected to Pass, so that actually running them generates an "unexpected" error.
        self.assertEqual(details.exit_code, 1)
        self.assertNotEmpty(err)

    def test_keyboard_interrupt(self):
        # Note that this also tests running a test marked as SKIP if
        # you specify it explicitly.
        self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)

        if self.should_test_processes:
            self.assertRaises(KeyboardInterrupt, logging_run,
                ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)

    def test_no_tests_found(self):
        details, err, _ = logging_run(['resources'], tests_included=True)
        self.assertEqual(details.exit_code, -1)
        self.assertContains(err, 'No tests to run.\n')

    def test_no_tests_found_2(self):
        details, err, _ = logging_run(['foo'], tests_included=True)
        self.assertEqual(details.exit_code, -1)
        self.assertContains(err, 'No tests to run.\n')

    def test_natural_order(self):
        tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=natural'] + tests_to_run)
        self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)

    def test_natural_order_test_specified_multiple_times(self):
        tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=natural'] + tests_to_run)
        self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)

    def test_random_order(self):
        tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=random'] + tests_to_run)
        self.assertEqual(sorted(tests_to_run), sorted(tests_run))

    def test_random_order_test_specified_multiple_times(self):
        tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=random'] + tests_to_run)
        self.assertEqual(tests_run.count('passes/audio.html'), 2)
        self.assertEqual(tests_run.count('passes/args.html'), 2)

    def test_no_order(self):
        tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=none'] + tests_to_run)
        self.assertEqual(tests_to_run, tests_run)

    def test_no_order_test_specified_multiple_times(self):
        tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
        tests_run = get_tests_run(['--order=none'] + tests_to_run)
        self.assertEqual(tests_to_run, tests_run)

    def test_no_order_with_directory_entries_in_natural_order(self):
        tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
        tests_run = get_tests_run(['--order=none'] + tests_to_run)
        self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])

    def test_gc_between_tests(self):
        self.assertTrue(passing_run(['--gc-between-tests']))

    def test_complex_text(self):
        self.assertTrue(passing_run(['--complex-text']))

    def test_threaded(self):
        self.assertTrue(passing_run(['--threaded']))

    def test_repeat_each(self):
        tests_to_run = ['passes/image.html', 'passes/text.html']
        tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
        self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])

    def test_ignore_flag(self):
        # Note that passes/image.html is expected to be run since we specified it directly.
        tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
        self.assertFalse('passes/text.html' in tests_run)
        self.assertTrue('passes/image.html' in tests_run)

    def test_skipped_flag(self):
        tests_run = get_tests_run(['passes'])
        self.assertFalse('passes/skipped/skip.html' in tests_run)
        num_tests_run_by_default = len(tests_run)

        # Check that nothing changes when we specify skipped=default.
        self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
                          num_tests_run_by_default)

        # Now check that we run one more test (the skipped one).
        tests_run = get_tests_run(['--skipped=ignore', 'passes'])
        self.assertTrue('passes/skipped/skip.html' in tests_run)
        self.assertEqual(len(tests_run), num_tests_run_by_default + 1)

        # Now check that we only run the skipped test.
        self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])

        # Now check that we don't run anything.
        self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])

    def test_iterations(self):
        tests_to_run = ['passes/image.html', 'passes/text.html']
        tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
        self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])

    def test_repeat_each_iterations_num_tests(self):
        # The total number of tests should be: number_of_tests *
        # repeat_each * iterations
        host = MockHost()
        _, err, _ = logging_run(
            ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
            tests_included=True, host=host)
        self.assertContains(err, "All 16 tests ran as expected.\n")

    def test_run_chunk(self):
        # Test that we actually select the right chunk
        all_tests_run = get_tests_run(['passes', 'failures'])
        chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
        self.assertEqual(all_tests_run[4:8], chunk_tests_run)

        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
        chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
        self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)

    def test_run_force(self):
        # This raises an exception because we run
        # failures/expected/exception.html, which is normally SKIPped.

        self.assertRaises(ValueError, logging_run, ['--force'])

    def test_run_part(self):
        # Test that we actually select the right part
        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
        tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
        self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)

        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
        # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
        # last part repeats the first two tests).
        chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
        self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)

    def test_run_singly(self):
        batch_tests_run = get_test_batches(['--run-singly'])
        for batch in batch_tests_run:
            self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))

    def test_skip_failing_tests(self):
        # This tests that we skip both known failing and known flaky tests. Because there are
        # no known flaky tests in the default test_expectations, we add additional expectations.
        host = MockHost()
        host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')

        batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
        has_passes_text = False
        for batch in batches:
            self.assertFalse('failures/expected/text.html' in batch)
            self.assertFalse('passes/image.html' in batch)
            has_passes_text = has_passes_text or ('passes/text.html' in batch)
        self.assertTrue(has_passes_text)

    def test_run_singly_actually_runs_tests(self):
        details, _, _ = logging_run(['--run-singly'], tests_included=True)
        self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1)  # failures/expected/hang.html actually passes w/ --run-singly.

    def test_single_file(self):
        tests_run = get_tests_run(['passes/text.html'])
        self.assertEqual(tests_run, ['passes/text.html'])

    def test_single_file_with_prefix(self):
        tests_run = get_tests_run(['LayoutTests/passes/text.html'])
        self.assertEqual(['passes/text.html'], tests_run)

    def test_single_skipped_file(self):
        tests_run = get_tests_run(['failures/expected/keybaord.html'])
        self.assertEqual([], tests_run)

    def test_stderr_is_saved(self):
        host = MockHost()
        self.assertTrue(passing_run(host=host))
        self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
                          'stuff going to stderr')

    def test_test_list(self):
        host = MockHost()
        filename = '/tmp/foo.txt'
        host.filesystem.write_text_file(filename, 'passes/text.html')
        tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
        self.assertEqual(['passes/text.html'], tests_run)
        host.filesystem.remove(filename)
        details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
        self.assertEqual(details.exit_code, -1)
        self.assertNotEmpty(err)

    def test_test_list_with_prefix(self):
        host = MockHost()
        filename = '/tmp/foo.txt'
        host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
        tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
        self.assertEqual(['passes/text.html'], tests_run)

    def test_missing_and_unexpected_results(self):
        # Test that we update expectations in place. If the expectation
        # is missing, update the expected generic location.
        host = MockHost()
        details, err, _ = logging_run(['--no-show-results',
            'failures/expected/missing_image.html',
            'failures/unexpected/missing_text.html',
            'failures/unexpected/text-image-checksum.html'],
            tests_included=True, host=host)
        file_list = host.filesystem.written_files.keys()
        self.assertEqual(details.exit_code, 1)
        expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        self.assertTrue(json_string.find(expected_token) != -1)
        self.assertTrue(json_string.find('"num_regressions":1') != -1)
        self.assertTrue(json_string.find('"num_flaky":0') != -1)
        self.assertTrue(json_string.find('"num_missing":1') != -1)

    def test_pixel_test_directories(self):
        host = MockHost()

        """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
        args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
                'failures/unexpected/pixeldir/image_in_pixeldir.html',
                'failures/unexpected/image_not_in_pixeldir.html']
        details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)

        self.assertEqual(details.exit_code, 1)
        expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        self.assertTrue(json_string.find(expected_token) != -1)

    def test_missing_and_unexpected_results_with_custom_exit_code(self):
        # Test that we update expectations in place. If the expectation
        # is missing, update the expected generic location.
        class CustomExitCodePort(test.TestPort):
            def exit_code_from_summarized_results(self, unexpected_results):
                return unexpected_results['num_regressions'] + unexpected_results['num_missing']

        host = MockHost()
        options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
        test_port = CustomExitCodePort(host, options=options)
        details, err, _ = logging_run(['--no-show-results',
            'failures/expected/missing_image.html',
            'failures/unexpected/missing_text.html',
            'failures/unexpected/text-image-checksum.html'],
            tests_included=True, host=host, port_obj=test_port)
        self.assertEqual(details.exit_code, 2)

    def test_crash_with_stderr(self):
        host = MockHost()
        _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)

    def test_no_image_failure_with_image_diff(self):
        host = MockHost()
        _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)

    def test_crash_log(self):
        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
        # Currently CrashLog uploading only works on Darwin.
        if not self._platform.is_mac():
            return
        mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
        host = MockHost()
        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
        _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
        expected_crash_log = mock_crash_report
        self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)

    def test_web_process_crash_log(self):
        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
        # Currently CrashLog uploading only works on Darwin.
        if not self._platform.is_mac():
            return
        mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
        host = MockHost()
        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
        logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
        self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)

    def test_exit_after_n_failures_upload(self):
        host = MockHost()
        details, regular_output, user = logging_run(
           ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
           tests_included=True, host=host)

        # By returning False, we know that the incremental results were generated and then deleted.
        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))

        # This checks that we report only the number of tests that actually failed.
        self.assertEqual(details.exit_code, 1)

        # This checks that passes/text.html is considered SKIPped.
        self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))

        # This checks that we told the user we bailed out.
        self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())

        # This checks that neither test ran as expected.
        # FIXME: This log message is confusing; tests that were skipped should be called out separately.
        self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())

    def test_exit_after_n_failures(self):
        # Unexpected failures should result in tests stopping.
        tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
        self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)

        # But we'll keep going for expected ones.
        tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
        self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)

    def test_exit_after_n_crashes(self):
        # Unexpected crashes should result in tests stopping.
        tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
        self.assertEqual(['failures/unexpected/crash.html'], tests_run)

        # Same with timeouts.
        tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
        self.assertEqual(['failures/unexpected/timeout.html'], tests_run)

        # But we'll keep going for expected ones.
        tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
        self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)

    def test_results_directory_absolute(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.

        host = MockHost()
        with host.filesystem.mkdtemp() as tmpdir:
            _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
            self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])

    def test_results_directory_default(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.

        # This is the default location.
        _, _, user = logging_run(tests_included=True)
        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])

    def test_results_directory_relative(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.
        host = MockHost()
        host.filesystem.maybe_make_directory('/tmp/cwd')
        host.filesystem.chdir('/tmp/cwd')
        _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
        self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])

    def test_retrying_and_flaky_tests(self):
        host = MockHost()
        details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
        self.assertEqual(details.exit_code, 0)
        self.assertTrue('Retrying' in err.getvalue())
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))

        # Now we test that --clobber-old-results does remove the old entries and the old retries,
        # and that we don't retry again.
        host = MockHost()
        details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
        self.assertEqual(details.exit_code, 1)
        self.assertTrue('Clobbering old results' in err.getvalue())
        self.assertTrue('flaky/text.html' in err.getvalue())
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
        self.assertFalse(host.filesystem.exists('retries'))

    def test_retrying_force_pixel_tests(self):
        host = MockHost()
        details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
        self.assertEqual(details.exit_code, 1)
        self.assertTrue('Retrying' in err.getvalue())
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        json = parse_full_results(json_string)
        self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
            {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1})
        self.assertFalse(json["pixel_tests_enabled"])
        self.assertEqual(details.enabled_pixel_tests_in_retry, True)

    def test_retrying_uses_retries_directory(self):
        host = MockHost()
        details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
        self.assertEqual(details.exit_code, 1)
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))

    def test_run_order__inline(self):
        # These next tests test that we run the tests in ascending alphabetical
        # order per directory. HTTP tests are sharded separately from other tests,
        # so we have to test both.
        tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
        self.assertEqual(tests_run, sorted(tests_run))

        tests_run = get_tests_run(['http/tests/passes'])
        self.assertEqual(tests_run, sorted(tests_run))

    def test_tolerance(self):
        class ImageDiffTestPort(test.TestPort):
            def diff_image(self, expected_contents, actual_contents, tolerance=None):
                self.tolerance_used_for_diff_image = self._options.tolerance
                return (True, 1, None)

        def get_port_for_run(args):
            options, parsed_args = run_webkit_tests.parse_args(args)
            host = MockHost()
            test_port = ImageDiffTestPort(host, options=options)
            res = passing_run(args, port_obj=test_port, tests_included=True)
            self.assertTrue(res)
            return test_port

        base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']

        # If we pass in an explicit tolerance argument, then that will be used.
        test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
        self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
        test_port = get_port_for_run(base_args + ['--tolerance', '0'])
        self.assertEqual(0, test_port.tolerance_used_for_diff_image)

        # Otherwise the port's default tolerance behavior (including ignoring it)
        # should be used.
        test_port = get_port_for_run(base_args)
        self.assertEqual(None, test_port.tolerance_used_for_diff_image)

    def test_virtual(self):
        self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
                                     'virtual/passes/text.html', 'virtual/passes/args.html']))

    def test_reftest_run(self):
        tests_run = get_tests_run(['passes/reftest.html'])
        self.assertEqual(['passes/reftest.html'], tests_run)

    def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
        tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
        self.assertEqual(['passes/reftest.html'], tests_run)

    def test_reftest_skip_reftests_if_no_ref_tests(self):
        tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
        self.assertEqual([], tests_run)
        tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
        self.assertEqual([], tests_run)

    def test_reftest_expected_html_should_be_ignored(self):
        tests_run = get_tests_run(['passes/reftest-expected.html'])
        self.assertEqual([], tests_run)

    def test_reftest_driver_should_run_expected_html(self):
        tests_run = get_test_results(['passes/reftest.html'])
        self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])

    def test_reftest_driver_should_run_expected_mismatch_html(self):
        tests_run = get_test_results(['passes/mismatch.html'])
        self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])

    def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
        host = MockHost()
        _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
        self.assertTrue(json_string.find('"num_regressions":4') != -1)
        self.assertTrue(json_string.find('"num_flaky":0') != -1)
        self.assertTrue(json_string.find('"num_missing":1') != -1)

    def test_additional_platform_directory(self):
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
        self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))

    def test_additional_expectations(self):
        host = MockHost()
        host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
        self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
                                    tests_included=True, host=host))

    def test_no_http_and_force(self):
        # See test_run_force, using --force raises an exception.
        # FIXME: We would like to check the warnings generated.
        self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])

    @staticmethod
    def has_test_of_type(tests, type):
        return [test for test in tests if type in test]

    def test_no_http_tests(self):
        batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
        self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
        self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))

        batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
        self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
        self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))

        batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
        self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
        self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))

    def test_platform_tests_are_found(self):
        tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
        self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
        self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)

    def test_output_diffs(self):
        # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
        # aren't available.
        host = MockHost()
        _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
        written_files = host.filesystem.written_files
        self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
        self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
        self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))

        full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
        self.assertEqual(full_results['has_wdiff'], False)
        self.assertEqual(full_results['has_pretty_patch'], False)

    def test_unsupported_platform(self):
        stdout = StringIO.StringIO()
        stderr = StringIO.StringIO()
        res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)

        self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
        self.assertEqual(stdout.getvalue(), '')
        self.assertTrue('unsupported platform' in stderr.getvalue())

    def test_build_check(self):
        # By using a port_name for a different platform than the one we're running on, the build check should always fail.
        if sys.platform == 'darwin':
            port_name = 'gtk'
        else:
            port_name = 'mac-lion'
        out = StringIO.StringIO()
        err = StringIO.StringIO()
        self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), -1)

    def test_verbose_in_child_processes(self):
        # When we actually run multiple processes, we may have to reconfigure logging in the
        # child process (e.g., on win32) and we need to make sure that works and we still
        # see the verbose log output. However, we can't use logging_run() because using
        # outputcapture to capture stdout and stderr latter results in a nonpicklable host.

        # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
        if not self.should_test_processes:
            return

        options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
        host = MockHost()
        port_obj = host.port_factory.get(port_name=options.platform, options=options)
        logging_stream = StringIO.StringIO()
        run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
        self.assertTrue('text.html passed' in logging_stream.getvalue())
        self.assertTrue('image.html passed' in logging_stream.getvalue())
Example #34
0
class MainTest(unittest.TestCase, StreamTestingMixin):
    def setUp(self):
        # A real PlatformInfo object is used here instead of a
        # MockPlatformInfo because we need to actually check for
        # Windows and Mac to skip some tests.
        self._platform = SystemHost().platform

        # FIXME: Remove this when we fix test-webkitpy to work
        # properly on cygwin (bug 63846).
        self.should_test_processes = not self._platform.is_win()

    def test_accelerated_compositing(self):
        # This just tests that we recognize the command line args
        self.assertTrue(passing_run(['--accelerated-video']))
        self.assertTrue(passing_run(['--no-accelerated-video']))

    def test_accelerated_2d_canvas(self):
        # This just tests that we recognize the command line args
        self.assertTrue(passing_run(['--accelerated-2d-canvas']))
        self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))

    def test_all(self):
        res, out, err, user = logging_run([], tests_included=True)
        self.assertEquals(res, unexpected_tests_count)

    def test_basic(self):
        self.assertTrue(passing_run())

    def test_batch_size(self):
        batch_tests_run = get_tests_run(['--batch-size', '2'])
        for batch in batch_tests_run:
            self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))

    def test_child_processes_2(self):
        if self.should_test_processes:
            _, _, regular_output, _ = logging_run(
                ['--print', 'config', '--child-processes', '2'])
            self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))

    def test_child_processes_min(self):
        if self.should_test_processes:
            _, _, regular_output, _ = logging_run(
                ['--print', 'config', '--child-processes', '2', 'passes'],
                tests_included=True)
            self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))

    def test_dryrun(self):
        batch_tests_run = get_tests_run(['--dry-run'])
        self.assertEqual(batch_tests_run, [])

        batch_tests_run = get_tests_run(['-n'])
        self.assertEqual(batch_tests_run, [])

    def test_exception_raised(self):
        # Exceptions raised by a worker are treated differently depending on
        # whether they are in-process or out. inline exceptions work as normal,
        # which allows us to get the full stack trace and traceback from the
        # worker. The downside to this is that it could be any error, but this
        # is actually useful in testing.
        #
        # Exceptions raised in a separate process are re-packaged into
        # WorkerExceptions, which have a string capture of the stack which can
        # be printed, but don't display properly in the unit test exception handlers.
        self.assertRaises(ValueError, logging_run,
            ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)

        if self.should_test_processes:
            self.assertRaises(run_webkit_tests.WorkerException, logging_run,
                ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True)

    def test_full_results_html(self):
        # FIXME: verify html?
        res, out, err, user = logging_run(['--full-results-html'])
        self.assertEqual(res, 0)

    def test_help_printing(self):
        res, out, err, user = logging_run(['--help-printing'])
        self.assertEqual(res, 0)
        self.assertEmpty(out)
        self.assertNotEmpty(err)

    def test_hung_thread(self):
        res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
                                          'failures/expected/hang.html'],
                                          tests_included=True)
        self.assertEqual(res, 0)
        self.assertNotEmpty(out)
        self.assertNotEmpty(err)

    def test_keyboard_interrupt(self):
        # Note that this also tests running a test marked as SKIP if
        # you specify it explicitly.
        self.assertRaises(KeyboardInterrupt, logging_run,
            ['failures/expected/keyboard.html', '--child-processes', '1'],
            tests_included=True)

        if self.should_test_processes:
            self.assertRaises(KeyboardInterrupt, logging_run,
                ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True)

    def test_no_tests_found(self):
        res, out, err, user = logging_run(['resources'], tests_included=True)
        self.assertEqual(res, -1)
        self.assertEmpty(out)
        self.assertContainsLine(err, 'No tests to run.\n')

    def test_no_tests_found_2(self):
        res, out, err, user = logging_run(['foo'], tests_included=True)
        self.assertEqual(res, -1)
        self.assertEmpty(out)
        self.assertContainsLine(err, 'No tests to run.\n')

    def test_randomize_order(self):
        # FIXME: verify order was shuffled
        self.assertTrue(passing_run(['--randomize-order']))

    def test_gc_between_tests(self):
        self.assertTrue(passing_run(['--gc-between-tests']))

    def test_complex_text(self):
        self.assertTrue(passing_run(['--complex-text']))

    def test_threaded(self):
        self.assertTrue(passing_run(['--threaded']))

    def test_repeat_each(self):
        tests_to_run = ['passes/image.html', 'passes/text.html']
        tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])

    def test_skip_pixel_test_if_no_baseline_option(self):
        tests_to_run = ['passes/image.html', 'passes/text.html']
        tests_run = get_tests_run(['--skip-pixel-test-if-no-baseline'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html'])

    def test_ignore_flag(self):
        # Note that passes/image.html is expected to be run since we specified it directly.
        tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'], flatten_batches=True, tests_included=True)
        self.assertFalse('passes/text.html' in tests_run)
        self.assertTrue('passes/image.html' in tests_run)

    def test_skipped_flag(self):
        tests_run = get_tests_run(['passes'], tests_included=True, flatten_batches=True)
        self.assertFalse('passes/skipped/skip.html' in tests_run)
        num_tests_run_by_default = len(tests_run)

        # Check that nothing changes when we specify skipped=default.
        self.assertEquals(len(get_tests_run(['--skipped=default', 'passes'], tests_included=True, flatten_batches=True)),
                          num_tests_run_by_default)

        # Now check that we run one more test (the skipped one).
        tests_run = get_tests_run(['--skipped=ignore', 'passes'], tests_included=True, flatten_batches=True)
        self.assertTrue('passes/skipped/skip.html' in tests_run)
        self.assertEquals(len(tests_run), num_tests_run_by_default + 1)

        # Now check that we only run the skipped test.
        self.assertEquals(get_tests_run(['--skipped=only', 'passes'], tests_included=True, flatten_batches=True),
                          ['passes/skipped/skip.html'])

    def test_iterations(self):
        tests_to_run = ['passes/image.html', 'passes/text.html']
        tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])

    def test_repeat_each_iterations_num_tests(self):
        # The total number of tests should be: number_of_tests *
        # repeat_each * iterations
        host = MockHost()
        res, out, err, _ = logging_run(['--iterations', '2',
                                        '--repeat-each', '4',
                                        '--print', 'everything',
                                        'passes/text.html', 'failures/expected/text.html'],
                                       tests_included=True, host=host, record_results=True)
        self.assertContainsLine(out, "=> Results: 8/16 tests passed (50.0%)\n")
        self.assertContainsLine(err, "All 16 tests ran as expected.\n")

    def test_run_chunk(self):
        # Test that we actually select the right chunk
        all_tests_run = get_tests_run(flatten_batches=True)
        chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
        self.assertEquals(all_tests_run[4:8], chunk_tests_run)

        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
        chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)

    def test_run_force(self):
        # This raises an exception because we run
        # failures/expected/exception.html, which is normally SKIPped.

        # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
        self.assertRaises(ValueError, logging_run, ['--force'])

    def test_run_part(self):
        # Test that we actually select the right part
        tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
        tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)

        # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
        # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
        # last part repeats the first two tests).
        chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)

    def test_run_singly(self):
        batch_tests_run = get_tests_run(['--run-singly'])
        for batch in batch_tests_run:
            self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))

    def test_skip_failing_tests(self):
        # This tests that we skip both known failing and known flaky tests. Because there are
        # no known flaky tests in the default test_expectations, we add additional expectations.
        host = MockHost()
        host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : passes/image.html = IMAGE PASS\n')

        batches = get_tests_run(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
        has_passes_text = False
        for batch in batches:
            self.assertFalse('failures/expected/text.html' in batch)
            self.assertFalse('passes/image.html' in batch)
            has_passes_text = has_passes_text or ('passes/text.html' in batch)
        self.assertTrue(has_passes_text)

    def test_run_singly_actually_runs_tests(self):
        res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
        self.assertEquals(res, 8)

    def test_single_file(self):
        # FIXME: We should consider replacing more of the get_tests_run()-style tests
        # with tests that read the tests_run* files, like this one.
        host = MockHost()
        tests_run = passing_run(['passes/text.html'], tests_included=True, host=host)
        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
                          'passes/text.html\n')

    def test_single_file_with_prefix(self):
        tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/text.html'], tests_run)

    def test_single_skipped_file(self):
        tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
        self.assertEquals([], tests_run)

    def test_stderr_is_saved(self):
        host = MockHost()
        self.assertTrue(passing_run(host=host))
        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
                          'stuff going to stderr')

    def test_test_list(self):
        host = MockHost()
        filename = '/tmp/foo.txt'
        host.filesystem.write_text_file(filename, 'passes/text.html')
        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
        self.assertEquals(['passes/text.html'], tests_run)
        host.filesystem.remove(filename)
        res, out, err, user = logging_run(['--test-list=%s' % filename],
                                          tests_included=True, host=host)
        self.assertEqual(res, -1)
        self.assertNotEmpty(err)

    def test_test_list_with_prefix(self):
        host = MockHost()
        filename = '/tmp/foo.txt'
        host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
        tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
        self.assertEquals(['passes/text.html'], tests_run)

    def test_unexpected_failures(self):
        # Run tests including the unexpected failures.
        self._url_opened = None
        res, out, err, user = logging_run(tests_included=True)

        self.assertEqual(res, unexpected_tests_count)
        self.assertNotEmpty(out)
        self.assertNotEmpty(err)
        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])

    def test_missing_and_unexpected_results(self):
        # Test that we update expectations in place. If the expectation
        # is missing, update the expected generic location.
        host = MockHost()
        res, out, err, _ = logging_run(['--no-show-results',
            'failures/expected/missing_image.html',
            'failures/unexpected/missing_text.html',
            'failures/unexpected/text-image-checksum.html'],
            tests_included=True, host=host, record_results=True)
        file_list = host.filesystem.written_files.keys()
        file_list.remove('/tmp/layout-test-results/tests_run0.txt')
        self.assertEquals(res, 1)
        expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        self.assertTrue(json_string.find(expected_token) != -1)
        self.assertTrue(json_string.find('"num_regressions":1') != -1)
        self.assertTrue(json_string.find('"num_flaky":0') != -1)
        self.assertTrue(json_string.find('"num_missing":1') != -1)

    def test_missing_and_unexpected_results_with_custom_exit_code(self):
        # Test that we update expectations in place. If the expectation
        # is missing, update the expected generic location.
        class CustomExitCodePort(TestPort):
            def exit_code_from_summarized_results(self, unexpected_results):
                return unexpected_results['num_regressions'] + unexpected_results['num_missing']

        host = MockHost()
        options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
        test_port = CustomExitCodePort(host, options=options)
        res, out, err, _ = logging_run(['--no-show-results',
            'failures/expected/missing_image.html',
            'failures/unexpected/missing_text.html',
            'failures/unexpected/text-image-checksum.html'],
            tests_included=True, host=host, record_results=True, port_obj=test_port)
        self.assertEquals(res, 2)

    def test_crash_with_stderr(self):
        host = MockHost()
        res, buildbot_output, regular_output, user = logging_run([
                'failures/unexpected/crash-with-stderr.html',
            ],
            tests_included=True,
            record_results=True,
            host=host)
        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)

    def test_no_image_failure_with_image_diff(self):
        host = MockHost()
        res, buildbot_output, regular_output, user = logging_run([
                'failures/unexpected/checksum-with-matching-image.html',
            ],
            tests_included=True,
            record_results=True,
            host=host)
        self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)

    def test_crash_log(self):
        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
        # Currently CrashLog uploading only works on Darwin.
        if not self._platform.is_mac():
            return
        mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
        host = MockHost()
        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
        res, buildbot_output, regular_output, user = logging_run([
                'failures/unexpected/crash-with-stderr.html',
            ],
            tests_included=True,
            record_results=True,
            host=host)
        expected_crash_log = mock_crash_report
        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)

    def test_web_process_crash_log(self):
        # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
        # Currently CrashLog uploading only works on Darwin.
        if not self._platform.is_mac():
            return
        mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
        host = MockHost()
        host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
        res, buildbot_output, regular_output, user = logging_run([
                'failures/unexpected/web-process-crash-with-stderr.html',
            ],
            tests_included=True,
            record_results=True,
            host=host)
        self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)

    def test_exit_after_n_failures_upload(self):
        host = MockHost()
        res, buildbot_output, regular_output, user = logging_run([
                'failures/unexpected/text-image-checksum.html',
                'passes/text.html',
                '--exit-after-n-failures', '1',
            ],
            tests_included=True,
            record_results=True,
            host=host)

        # By returning False, we know that the incremental results were generated and then deleted.
        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))

        # This checks that we report only the number of tests that actually failed.
        self.assertEquals(res, 1)

        # This checks that passes/text.html is considered SKIPped.
        self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))

        # This checks that we told the user we bailed out.
        self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())

        # This checks that neither test ran as expected.
        # FIXME: This log message is confusing; tests that were skipped should be called out separately.
        self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())

    def test_exit_after_n_failures(self):
        # Unexpected failures should result in tests stopping.
        tests_run = get_tests_run([
                'failures/unexpected/text-image-checksum.html',
                'passes/text.html',
                '--exit-after-n-failures', '1',
            ],
            tests_included=True,
            flatten_batches=True)
        self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)

        # But we'll keep going for expected ones.
        tests_run = get_tests_run([
                'failures/expected/text.html',
                'passes/text.html',
                '--exit-after-n-failures', '1',
            ],
            tests_included=True,
            flatten_batches=True)
        self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)

    def test_exit_after_n_crashes(self):
        # Unexpected crashes should result in tests stopping.
        tests_run = get_tests_run([
                'failures/unexpected/crash.html',
                'passes/text.html',
                '--exit-after-n-crashes-or-timeouts', '1',
            ],
            tests_included=True,
            flatten_batches=True)
        self.assertEquals(['failures/unexpected/crash.html'], tests_run)

        # Same with timeouts.
        tests_run = get_tests_run([
                'failures/unexpected/timeout.html',
                'passes/text.html',
                '--exit-after-n-crashes-or-timeouts', '1',
            ],
            tests_included=True,
            flatten_batches=True)
        self.assertEquals(['failures/unexpected/timeout.html'], tests_run)

        # But we'll keep going for expected ones.
        tests_run = get_tests_run([
                'failures/expected/crash.html',
                'passes/text.html',
                '--exit-after-n-crashes-or-timeouts', '1',
            ],
            tests_included=True,
            flatten_batches=True)
        self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)

    def test_results_directory_absolute(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.

        host = MockHost()
        with host.filesystem.mkdtemp() as tmpdir:
            res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
                                              tests_included=True, host=host)
            self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])

    def test_results_directory_default(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.

        # This is the default location.
        res, out, err, user = logging_run(tests_included=True)
        self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])

    def test_results_directory_relative(self):
        # We run a configuration that should fail, to generate output, then
        # look for what the output results url was.
        host = MockHost()
        host.filesystem.maybe_make_directory('/tmp/cwd')
        host.filesystem.chdir('/tmp/cwd')
        res, out, err, user = logging_run(['--results-directory=foo'],
                                          tests_included=True, host=host)
        self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])

    def test_retrying_and_flaky_tests(self):
        host = MockHost()
        res, out, err, _ = logging_run(['failures/flaky'], tests_included=True, host=host)
        self.assertEquals(res, 0)
        self.assertTrue('Retrying' in err.getvalue())
        self.assertTrue('Unexpected flakiness' in out.getvalue())
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/tests_run0.txt'))
        self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))

        # Now we test that --clobber-old-results does remove the old entries and the old retries,
        # and that we don't retry again.
        res, out, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
        self.assertEquals(res, 1)
        self.assertTrue('Clobbering old results' in err.getvalue())
        self.assertTrue('flaky/text.html' in err.getvalue())
        self.assertTrue('Unexpected text diff' in out.getvalue())
        self.assertFalse('Unexpected flakiness' in out.getvalue())
        self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
        self.assertFalse(host.filesystem.exists('retries'))


    # These next tests test that we run the tests in ascending alphabetical
    # order per directory. HTTP tests are sharded separately from other tests,
    # so we have to test both.
    def assert_run_order(self, child_processes='1'):
        tests_run = get_tests_run(['--child-processes', child_processes, 'passes'],
            tests_included=True, flatten_batches=True)
        self.assertEquals(tests_run, sorted(tests_run))

        tests_run = get_tests_run(['--child-processes', child_processes, 'http/tests/passes'],
            tests_included=True, flatten_batches=True)
        self.assertEquals(tests_run, sorted(tests_run))

    def test_run_order__inline(self):
        self.assert_run_order()

    def test_tolerance(self):
        class ImageDiffTestPort(TestPort):
            def diff_image(self, expected_contents, actual_contents, tolerance=None):
                self.tolerance_used_for_diff_image = self._options.tolerance
                return (True, 1)

        def get_port_for_run(args):
            options, parsed_args = run_webkit_tests.parse_args(args)
            host = MockHost()
            test_port = ImageDiffTestPort(host, options=options)
            res = passing_run(args, port_obj=test_port, tests_included=True)
            self.assertTrue(res)
            return test_port

        base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']

        # If we pass in an explicit tolerance argument, then that will be used.
        test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
        self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
        test_port = get_port_for_run(base_args + ['--tolerance', '0'])
        self.assertEqual(0, test_port.tolerance_used_for_diff_image)

        # Otherwise the port's default tolerance behavior (including ignoring it)
        # should be used.
        test_port = get_port_for_run(base_args)
        self.assertEqual(None, test_port.tolerance_used_for_diff_image)

    def test_virtual(self):
        self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
                                     'virtual/passes/text.html', 'virtual/passes/args.html']))

    def test_reftest_run(self):
        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/reftest.html'], tests_run)

    def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
        tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
        self.assertEquals(['passes/reftest.html'], tests_run)

    def test_reftest_skip_reftests_if_no_ref_tests(self):
        tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
        self.assertEquals([], tests_run)
        tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
        self.assertEquals([], tests_run)

    def test_reftest_expected_html_should_be_ignored(self):
        tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
        self.assertEquals([], tests_run)

    def test_reftest_driver_should_run_expected_html(self):
        tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
        self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)

    def test_reftest_driver_should_run_expected_mismatch_html(self):
        tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
        self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)

    def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
        host = MockHost()
        res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
        json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
        self.assertTrue(json_string.find('"num_regressions":4') != -1)
        self.assertTrue(json_string.find('"num_flaky":0') != -1)
        self.assertTrue(json_string.find('"num_missing":1') != -1)

    def test_additional_platform_directory(self):
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))

        res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
        self.assertContainsLine(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')

    def test_additional_expectations(self):
        host = MockHost()
        host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : failures/unexpected/mismatch.html = IMAGE\n')
        self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
                                     tests_included=True, host=host))

    def test_no_http_and_force(self):
        # See test_run_force, using --force raises an exception.
        # FIXME: We would like to check the warnings generated.
        self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])

    @staticmethod
    def has_test_of_type(tests, type):
        return [test for test in tests if type in test]

    def test_no_http_tests(self):
        batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
        self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
        self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))

        batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
        self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
        self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))

        batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
        self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
        self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))

    def test_platform_tests_are_found(self):
        tests_run = get_tests_run(['http'], tests_included=True, flatten_batches=True)
        self.assertTrue('platform/test-snow-leopard/http/test.html' in tests_run)

    def test_output_diffs(self):
        # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
        # aren't available.
        host = MockHost()
        res, out, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'],
                                       tests_included=True, record_results=True, host=host)
        written_files = host.filesystem.written_files
        self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
        self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
        self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))

        full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
        full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
        self.assertEquals(full_results['has_wdiff'], False)
        self.assertEquals(full_results['has_pretty_patch'], False)