Exemplo n.º 1
0
    def run_mochitest_test(self,
                           suite=None,
                           test_file=None,
                           debugger=None,
                           shuffle=False,
                           keep_open=False,
                           rerun_failures=False,
                           no_autorun=False,
                           repeat=0,
                           slow=False):
        """Runs a mochitest.

        test_file is a path to a test file. It can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        shuffle is whether test order should be shuffled (defaults to false).

        keep_open denotes whether to keep the browser open after tests
        complete.
        """
        # TODO Bug 794506 remove once mach integrates with virtualenv.
        build_path = os.path.join(self.topobjdir, 'build')
        if build_path not in sys.path:
            sys.path.append(build_path)

        if rerun_failures and test_file:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Need to call relpath before os.chdir() below.
        test_path = ''
        if test_file:
            test_path = self._wrap_path_argument(test_file).relpath()

        tests_dir = os.path.join(self.topobjdir, '_tests')
        mochitest_dir = os.path.join(tests_dir, 'testing', 'mochitest')

        failure_file_path = os.path.join(self.statedir,
                                         'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        from StringIO import StringIO
        from automation import Automation

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

        import mochitest

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        automation = Automation()

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [
            l for l in logging.getLogger().handlers
            if isinstance(l, logging.StreamHandler)
        ]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        runner = mochitest.Mochitest(automation)

        opts = mochitest.MochitestOptions(automation, tests_dir)
        options, args = opts.parse_args([])

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            pass
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        options.autorun = not no_autorun
        options.closeWhenDone = not keep_open
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runSlower = slow
        options.testingModulesDir = os.path.join(tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(self.distdir,
                                           'crashreporter-symbols')

        options.failureFile = failure_file_path

        automation.setServerInfo(options.webServer, options.httpPort,
                                 options.sslPort, options.webSocketPort)

        if test_path:
            test_root = runner.getTestRoot(options)
            test_root_file = mozpack.path.join(mochitest_dir, test_root,
                                               test_path)
            if not os.path.exists(test_root_file):
                print('Specified test path does not exist: %s' %
                      test_root_file)
                print(
                    'You may need to run |mach build| to build the test files.'
                )
                return 1

            options.testPath = test_path
            env = {'TEST_PATH': test_path}

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        # Output processing is a little funky here. The old make targets
        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
        # after test execution. Ideally the test runner would expose a Python
        # API for obtaining test results and we could just format failures
        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
        # all output to a buffer then "grep" the buffer after test execution.
        # Bug 858197 tracks a Python API that would facilitate this.
        test_output = StringIO()
        handler = logging.StreamHandler(test_output)
        handler.addFilter(UnexpectedFilter())
        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
        logging.getLogger().addHandler(handler)

        result = runner.runTests(options)

        # Need to remove our buffering handler before we echo failures or else
        # it will catch them again!
        logging.getLogger().removeHandler(handler)
        self.log_manager.disable_unstructured()

        if test_output.getvalue():
            for line in test_output.getvalue().splitlines():
                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')

        return result
Exemplo n.º 2
0
    def run_desktop_test(self,
                         context,
                         suite=None,
                         test_paths=None,
                         debugger=None,
                         debugger_args=None,
                         slowscript=False,
                         shuffle=False,
                         keep_open=False,
                         rerun_failures=False,
                         no_autorun=False,
                         repeat=0,
                         run_until_failure=False,
                         slow=False,
                         chunk_by_dir=0,
                         total_chunks=None,
                         this_chunk=None,
                         jsdebugger=False,
                         debug_on_failure=False,
                         start_at=None,
                         end_at=None,
                         e10s=False,
                         dmd=False,
                         dump_output_directory=None,
                         dump_about_memory_after_test=False,
                         dump_dmd_after_test=False,
                         install_extension=None,
                         quiet=False,
                         **kwargs):
        """Runs a mochitest.

        test_paths are path to tests. They can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        debugger_args are the arguments passed to the debugger.

        slowscript is true if the user has requested the SIGSEGV mechanism of
        invoking the slow script dialog.

        shuffle is whether test order should be shuffled (defaults to false).

        keep_open denotes whether to keep the browser open after tests
        complete.
        """
        if rerun_failures and test_paths:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Need to call relpath before os.chdir() below.
        if test_paths:
            test_paths = [
                self._wrap_path_argument(p).relpath() for p in test_paths
            ]

        failure_file_path = os.path.join(self.statedir,
                                         'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        from StringIO import StringIO

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(self.mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

        import mozinfo
        import mochitest
        from manifestparser import TestManifest
        from mozbuild.testing import TestResolver

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [
            l for l in logging.getLogger().handlers
            if isinstance(l, logging.StreamHandler)
        ]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        runner = mochitest.Mochitest()

        opts = mochitest.MochitestOptions()
        options, args = opts.parse_args([])

        flavor = suite

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            flavor = 'mochitest'
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
            flavor = 'browser-chrome'
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        elif suite == 'webapprt-content':
            options.webapprtContent = True
            options.app = self.get_webapp_runtime_path()
        elif suite == 'webapprt-chrome':
            options.webapprtChrome = True
            options.app = self.get_webapp_runtime_path()
            options.browserArgs.append("-test-mode")
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        if dmd:
            options.dmdPath = self.bin_dir

        options.autorun = not no_autorun
        options.closeWhenDone = not keep_open
        options.slowscript = slowscript
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runUntilFailure = run_until_failure
        options.runSlower = slow
        options.testingModulesDir = os.path.join(self.tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(self.distdir,
                                           'crashreporter-symbols')
        options.chunkByDir = chunk_by_dir
        options.totalChunks = total_chunks
        options.thisChunk = this_chunk
        options.jsdebugger = jsdebugger
        options.debugOnFailure = debug_on_failure
        options.startAt = start_at
        options.endAt = end_at
        options.e10s = e10s
        options.dumpAboutMemoryAfterTest = dump_about_memory_after_test
        options.dumpDMDAfterTest = dump_dmd_after_test
        options.dumpOutputDirectory = dump_output_directory
        options.quiet = quiet

        options.failureFile = failure_file_path
        if install_extension != None:
            options.extensionsToInstall = [
                os.path.join(self.topsrcdir, install_extension)
            ]

        for k, v in kwargs.iteritems():
            setattr(options, k, v)

        if test_paths:
            resolver = self._spawn(TestResolver)

            tests = list(
                resolver.resolve_tests(paths=test_paths,
                                       flavor=flavor,
                                       cwd=context.cwd))

            if not tests:
                print('No tests could be found in the path specified. Please '
                      'specify a path that is a test file or is a directory '
                      'containing tests.')
                return 1

            manifest = TestManifest()
            manifest.tests.extend(tests)

            options.manifestFile = manifest

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        if debugger_args:
            if options.debugger == None:
                print("--debugger-args passed, but no debugger specified.")
                return 1
            options.debuggerArgs = debugger_args

        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        # Output processing is a little funky here. The old make targets
        # grepped the log output from TEST-UNEXPECTED-* and printed these lines
        # after test execution. Ideally the test runner would expose a Python
        # API for obtaining test results and we could just format failures
        # appropriately. Unfortunately, it doesn't yet do that. So, we capture
        # all output to a buffer then "grep" the buffer after test execution.
        # Bug 858197 tracks a Python API that would facilitate this.
        test_output = StringIO()
        handler = logging.StreamHandler(test_output)
        handler.addFilter(UnexpectedFilter())
        handler.setFormatter(StructuredHumanFormatter(0, write_times=False))
        logging.getLogger().addHandler(handler)

        result = runner.runTests(options)

        # Need to remove our buffering handler before we echo failures or else
        # it will catch them again!
        logging.getLogger().removeHandler(handler)
        self.log_manager.disable_unstructured()

        if test_output.getvalue():
            result = 1
            for line in test_output.getvalue().splitlines():
                self.log(logging.INFO, 'unexpected', {'msg': line}, '{msg}')

        return result
Exemplo n.º 3
0
    def run_desktop_test(
            self,
            context,
            suite=None,
            test_paths=None,
            debugger=None,
            debugger_args=None,
            slowscript=False,
            screenshot_on_fail=False,
            shuffle=False,
            closure_behaviour='auto',
            rerun_failures=False,
            no_autorun=False,
            repeat=0,
            run_until_failure=False,
            slow=False,
            chunk_by_dir=0,
            total_chunks=None,
            this_chunk=None,
            extraPrefs=[],
            jsdebugger=False,
            debug_on_failure=False,
            start_at=None,
            end_at=None,
            e10s=False,
            strict_content_sandbox=False,
            nested_oop=False,
            dmd=False,
            dump_output_directory=None,
            dump_about_memory_after_test=False,
            dump_dmd_after_test=False,
            install_extension=None,
            quiet=False,
            environment=[],
            app_override=None,
            bisectChunk=None,
            runByDir=False,
            useTestMediaDevices=False,
            timeout=None,
            max_timeouts=None,
            **kwargs):
        """Runs a mochitest.

        test_paths are path to tests. They can be a relative path from the
        top source directory, an absolute filename, or a directory containing
        test files.

        suite is the type of mochitest to run. It can be one of ('plain',
        'chrome', 'browser', 'metro', 'a11y', 'jetpack-package', 'jetpack-addon').

        debugger is a program name or path to a binary (presumably a debugger)
        to run the test in. e.g. 'gdb'

        debugger_args are the arguments passed to the debugger.

        slowscript is true if the user has requested the SIGSEGV mechanism of
        invoking the slow script dialog.

        shuffle is whether test order should be shuffled (defaults to false).

        closure_behaviour denotes whether to keep the browser open after tests
        complete.
        """
        if rerun_failures and test_paths:
            print('Cannot specify both --rerun-failures and a test path.')
            return 1

        # Make absolute paths relative before calling os.chdir() below.
        if test_paths:
            test_paths = [self._wrap_path_argument(
                p).relpath() if os.path.isabs(p) else p for p in test_paths]

        failure_file_path = os.path.join(
            self.statedir,
            'mochitest_failures.json')

        if rerun_failures and not os.path.exists(failure_file_path):
            print('No failure file present. Did you run mochitests before?')
            return 1

        # runtests.py is ambiguous, so we load the file/module manually.
        if 'mochitest' not in sys.modules:
            import imp
            path = os.path.join(self.mochitest_dir, 'runtests.py')
            with open(path, 'r') as fh:
                imp.load_module('mochitest', fh, path,
                                ('.py', 'r', imp.PY_SOURCE))

        import mochitest
        from manifestparser import TestManifest
        from mozbuild.testing import TestResolver

        # This is required to make other components happy. Sad, isn't it?
        os.chdir(self.topobjdir)

        # Automation installs its own stream handler to stdout. Since we want
        # all logging to go through us, we just remove their handler.
        remove_handlers = [l for l in logging.getLogger().handlers
                           if isinstance(l, logging.StreamHandler)]
        for handler in remove_handlers:
            logging.getLogger().removeHandler(handler)

        opts = mochitest.MochitestOptions()
        options, args = opts.parse_args([])

        options.subsuite = ''
        flavor = suite

        # Need to set the suite options before verifyOptions below.
        if suite == 'plain':
            # Don't need additional options for plain.
            flavor = 'mochitest'
        elif suite == 'chrome':
            options.chrome = True
        elif suite == 'browser':
            options.browserChrome = True
            flavor = 'browser-chrome'
        elif suite == 'devtools':
            options.browserChrome = True
            options.subsuite = 'devtools'
        elif suite == 'jetpack-package':
            options.jetpackPackage = True
        elif suite == 'jetpack-addon':
            options.jetpackAddon = True
        elif suite == 'metro':
            options.immersiveMode = True
            options.browserChrome = True
        elif suite == 'a11y':
            options.a11y = True
        elif suite == 'webapprt-content':
            options.webapprtContent = True
            options.app = self.get_webapp_runtime_path()
        elif suite == 'webapprt-chrome':
            options.webapprtChrome = True
            options.app = self.get_webapp_runtime_path()
            options.browserArgs.append("-test-mode")
        else:
            raise Exception('None or unrecognized mochitest suite type.')

        if dmd:
            options.dmdPath = self.bin_dir

        options.autorun = not no_autorun
        options.closeWhenDone = closure_behaviour != 'open'
        options.slowscript = slowscript
        options.screenshotOnFail = screenshot_on_fail
        options.shuffle = shuffle
        options.consoleLevel = 'INFO'
        options.repeat = repeat
        options.runUntilFailure = run_until_failure
        options.runSlower = slow
        options.testingModulesDir = os.path.join(self.tests_dir, 'modules')
        options.extraProfileFiles.append(os.path.join(self.distdir, 'plugins'))
        options.symbolsPath = os.path.join(
            self.distdir,
            'crashreporter-symbols')
        options.chunkByDir = chunk_by_dir
        options.totalChunks = total_chunks
        options.thisChunk = this_chunk
        options.jsdebugger = jsdebugger
        options.debugOnFailure = debug_on_failure
        options.startAt = start_at
        options.endAt = end_at
        options.e10s = e10s
        options.strictContentSandbox = strict_content_sandbox
        options.nested_oop = nested_oop
        options.dumpAboutMemoryAfterTest = dump_about_memory_after_test
        options.dumpDMDAfterTest = dump_dmd_after_test
        options.dumpOutputDirectory = dump_output_directory
        options.quiet = quiet
        options.environment = environment
        options.extraPrefs = extraPrefs
        options.bisectChunk = bisectChunk
        options.runByDir = runByDir
        options.useTestMediaDevices = useTestMediaDevices
        if timeout:
            options.timeout = int(timeout)
        if max_timeouts:
            options.maxTimeouts = int(max_timeouts)

        options.failureFile = failure_file_path
        if install_extension is not None:
            options.extensionsToInstall = [
                os.path.join(
                    self.topsrcdir,
                    install_extension)]

        for k, v in kwargs.iteritems():
            setattr(options, k, v)

        if test_paths:
            resolver = self._spawn(TestResolver)

            tests = list(
                resolver.resolve_tests(
                    paths=test_paths,
                    flavor=flavor))

            if not tests:
                print('No tests could be found in the path specified. Please '
                      'specify a path that is a test file or is a directory '
                      'containing tests.')
                return 1

            manifest = TestManifest()
            manifest.tests.extend(tests)

            if len(
                    tests) == 1 and closure_behaviour == 'auto' and suite == 'plain':
                options.closeWhenDone = False

            options.manifestFile = manifest

        if rerun_failures:
            options.testManifest = failure_file_path

        if debugger:
            options.debugger = debugger

        if debugger_args:
            if options.debugger is None:
                print("--debugger-args passed, but no debugger specified.")
                return 1
            options.debuggerArgs = debugger_args

        if app_override:
            if app_override == "dist":
                options.app = self.get_binary_path(where='staged-package')
            elif app_override:
                options.app = app_override
            if options.gmp_path is None:
                # Need to fix the location of gmp_fake which might not be
                # shipped in the binary
                bin_path = self.get_binary_path()
                options.gmp_path = os.path.join(
                    os.path.dirname(bin_path),
                    'gmp-fake',
                    '1.0')
                options.gmp_path += os.pathsep
                options.gmp_path += os.path.join(
                    os.path.dirname(bin_path),
                    'gmp-clearkey',
                    '0.1')

        logger_options = {
            key: value for key,
            value in vars(options).iteritems() if key.startswith('log')}
        runner = mochitest.Mochitest(logger_options)
        options = opts.verifyOptions(options, runner)

        if options is None:
            raise Exception('mochitest option validator failed.')

        # We need this to enable colorization of output.
        self.log_manager.enable_unstructured()

        result = runner.runTests(options)

        self.log_manager.disable_unstructured()
        if runner.message_logger.errors:
            result = 1
            runner.message_logger.logger.warning("The following tests failed:")
            for error in runner.message_logger.errors:
                runner.message_logger.logger.log_raw(error)

        runner.message_logger.finish()

        return result