Example #1
0
class MachCommands(MachCommandBase):
    @Command("web-platform-tests",
             category="testing",
             conditions=[conditions.is_firefox],
             parser=wptcommandline.create_parser(["firefox"]))
    def run_web_platform_tests(self, **params):
        self.setup()
        wpt_runner = self._spawn(WebPlatformTestsRunner)

        if params["list_test_groups"]:
            return wpt_runner.list_test_groups(**params)
        else:
            return wpt_runner.run_tests(**params)

    @Command("web-platform-tests-update",
             category="testing",
             parser=updatecommandline.create_parser())
    def update_web_platform_tests(self, **params):
        self.setup()
        self.virtualenv_manager.install_pip_package('html5lib==0.99')
        self.virtualenv_manager.install_pip_package('requests')
        wpt_updater = self._spawn(WebPlatformTestsUpdater)
        return wpt_updater.run_update(**params)

    def setup(self):
        self._activate_virtualenv()

    @Command("web-platform-tests-reduce",
             category="testing",
             conditions=[conditions.is_firefox],
             parser=wptcommandline.create_parser_reduce(["firefox"]))
    def unstable_web_platform_tests(self, **params):
        self.setup()
        wpt_reduce = self._spawn(WebPlatformTestsReduce)
        return wpt_reduce.run_reduce(**params)
Example #2
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    @Command('test',
             description='Run specified Servo tests',
             category='testing')
    @CommandArgument('params',
                     default=None,
                     nargs="...",
                     help="Optionally select test based on "
                     "test file directory")
    @CommandArgument('--render-mode',
                     '-rm',
                     default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. " +
                     HELP_RENDER_MODE)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    @CommandArgument('--tidy-all',
                     default=False,
                     action="store_true",
                     help="Check all files, and run the WPT lint in tidy, "
                     "even if unchanged")
    @CommandArgument('--no-progress',
                     default=False,
                     action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test',
                     default=False,
                     action="store_true",
                     help="Run unit tests for tidy")
    @CommandArgument('--all',
                     default=False,
                     action="store_true",
                     dest="all_suites",
                     help="Run all test suites")
    def test(self,
             params,
             render_mode=DEFAULT_RENDER_MODE,
             release=False,
             tidy_all=False,
             no_progress=False,
             self_test=False,
             all_suites=False):
        suites = copy.deepcopy(TEST_SUITES)
        suites["tidy"]["kwargs"] = {
            "all_files": tidy_all,
            "no_progress": no_progress,
            "self_test": self_test
        }
        suites["wpt"]["kwargs"] = {"release": release}
        suites["css"]["kwargs"] = {"release": release}
        suites["unit"]["kwargs"] = {}
        suites["compiletest"]["kwargs"] = {"release": release}

        selected_suites = OrderedDict()

        if params is None:
            if all_suites:
                params = suites.keys()
            else:
                print(
                    "Specify a test path or suite name, or pass --all to run all test suites.\n\nAvailable suites:"
                )
                for s in suites:
                    print("    %s" % s)
                return 1

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True
            else:
                suite = self.suite_for_path(arg)
                if suite is not None:
                    if suite not in selected_suites:
                        selected_suites[suite] = []
                    selected_suites[suite].append(arg)
                    found = True
                    break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    # Helper to determine which test suite owns the path
    def suite_for_path(self, path_arg):
        if os.path.exists(path.abspath(path_arg)):
            abs_path = path.abspath(path_arg)
            for prefix, suite in TEST_SUITES_BY_PREFIX.iteritems():
                if abs_path.startswith(prefix):
                    return suite
        return None

    @Command('test-geckolib',
             description='Test geckolib sanity checks',
             category='testing')
    def test_geckolib(self):
        self.ensure_bootstrapped()

        env = self.build_env()
        env["RUST_BACKTRACE"] = "1"

        return call(["cargo", "test"],
                    env=env,
                    cwd=path.join("ports", "geckolib"))

    @Command('test-unit', description='Run unit tests', category='testing')
    @CommandArgument('--package',
                     '-p',
                     default=None,
                     help="Specific package to test")
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path"
                     )
    def test_unit(self, test_name=None, package=None):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/unit/<package>'
            match = re.search("tests/unit/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        if not packages:
            packages = set(
                os.listdir(path.join(self.context.topdir, "tests", "unit")))

        packages.remove('stylo')

        args = ["cargo", "test"]
        for crate in packages:
            args += ["-p", "%s_tests" % crate]
        args += test_patterns

        features = self.servo_features()
        if features:
            args += ["--features", "%s" % ' '.join(features)]

        env = self.build_env()
        env["RUST_BACKTRACE"] = "1"

        if sys.platform in ("win32", "msys"):
            if "msvc" in host_triple():
                # on MSVC, we need some DLLs in the path. They were copied
                # in to the servo.exe build dir, so just point PATH to that.
                env["PATH"] = "%s%s%s" % (path.dirname(
                    self.get_binary_path(False,
                                         False)), os.pathsep, env["PATH"])
            else:
                env["RUSTFLAGS"] = "-C link-args=-Wl,--subsystem,windows"

        result = call(args, env=env, cwd=self.servo_crate())
        if result != 0:
            return result

    @Command('test-stylo',
             description='Run stylo unit tests',
             category='testing')
    def test_stylo(self):
        self.set_use_stable_rust()
        self.ensure_bootstrapped()

        env = self.build_env()
        env["RUST_BACKTRACE"] = "1"
        env["CARGO_TARGET_DIR"] = path.join(self.context.topdir, "target",
                                            "geckolib").encode("UTF-8")

        with cd(path.join("ports", "geckolib")):
            result = call(["cargo", "test", "-p", "stylo_tests"], env=env)

        if result != 0:
            return result

    @Command('test-compiletest',
             description='Run compiletests',
             category='testing')
    @CommandArgument('--package',
                     '-p',
                     default=None,
                     help="Specific package to test")
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path"
                     )
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_compiletest(self, test_name=None, package=None, release=False):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/compiletest/<package>'
            match = re.search("tests/compiletest/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/compiletest/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        if not packages:
            packages = set(
                os.listdir(
                    path.join(self.context.topdir, "tests", "compiletest")))

        packages.remove("helper")

        args = ["cargo", "test"]
        for crate in packages:
            args += ["-p", "%s_compiletest" % crate]
        args += test_patterns

        env = self.build_env()
        if release:
            env["BUILD_MODE"] = "release"
            args += ["--release"]
        else:
            env["BUILD_MODE"] = "debug"

        result = call(args, env=env, cwd=self.servo_crate())
        if result != 0:
            return result

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    @CommandArgument('--all',
                     default=False,
                     action="store_true",
                     dest="all_files",
                     help="Check all files, and run the WPT lint in tidy, "
                     "even if unchanged")
    @CommandArgument('--no-progress',
                     default=False,
                     action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test',
                     default=False,
                     action="store_true",
                     help="Run unit tests for tidy")
    def test_tidy(self, all_files, no_progress, self_test):
        if self_test:
            return test_tidy.do_tests()
        else:
            return tidy.scan(not all_files, not no_progress)

    @Command('test-webidl',
             description='Run the WebIDL parser tests',
             category='testing')
    @CommandArgument('--quiet',
                     '-q',
                     default=False,
                     action="store_true",
                     help="Don't print passing tests.")
    @CommandArgument(
        'tests',
        default=None,
        nargs="...",
        help="Specific tests to run, relative to the tests directory")
    def test_webidl(self, quiet, tests):
        self.ensure_bootstrapped()

        test_file_dir = path.abspath(
            path.join(PROJECT_TOPLEVEL_PATH, "components", "script", "dom",
                      "bindings", "codegen", "parser"))
        # For the `import WebIDL` in runtests.py
        sys.path.insert(0, test_file_dir)

        run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)

        verbose = not quiet
        return run_globals["run_tests"](tests, verbose)

    @Command(
        'test-wpt-failure',
        description=
        'Run the tests harness that verifies that the test failures are reported correctly',
        category='testing')
    def test_wpt_failure(self):
        self.ensure_bootstrapped()
        return not call([
            "bash",
            path.join("tests", "wpt", "run.sh"), "--no-pause-after-test",
            "--include", "infrastructure/failing-test.html"
        ],
                        env=self.build_env())

    @Command('test-wpt',
             description='Run the regular web platform test suite',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        return self.run_test_list_or_dispatch(kwargs["test_list"], "wpt",
                                              self._test_wpt, **kwargs)

    def _test_wpt(self, **kwargs):
        hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt',
                                    'hosts')
        os.environ["hosts_file_path"] = hosts_file_path
        run_file = path.abspath(
            path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
        return self.wptrunner(run_file, **kwargs)

    # Helper to ensure all specified paths are handled, otherwise dispatch to appropriate test suite.
    def run_test_list_or_dispatch(self, requested_paths, correct_suite,
                                  correct_function, **kwargs):
        if not requested_paths:
            return correct_function(**kwargs)
        else:
            # Paths specified on command line. Ensure they can be handled, re-dispatch otherwise.
            all_handled = True
            for test_path in requested_paths:
                suite = self.suite_for_path(test_path)
                if suite is not None and correct_suite != suite:
                    all_handled = False
                    print(
                        "Warning: %s is not a %s test. Delegating to test-%s."
                        % (test_path, correct_suite, suite))
            if all_handled:
                return correct_function(**kwargs)
            else:
                # Dispatch each test to the correct suite via test()
                Registrar.dispatch("test",
                                   context=self.context,
                                   params=requested_paths)

    # Helper for test_css and test_wpt:
    def wptrunner(self, run_file, **kwargs):
        os.environ["RUST_BACKTRACE"] = "1"
        kwargs["debug"] = not kwargs["release"]
        if kwargs.pop("chaos"):
            kwargs["debugger"] = "rr"
            kwargs["debugger_args"] = "record --chaos"
            kwargs["repeat_until_unexpected"] = True
            # TODO: Delete rr traces from green test runs?
        prefs = kwargs.pop("prefs")
        if prefs:
            binary_args = []
            for pref in prefs:
                binary_args.append("--pref=" + pref)
            kwargs["binary_args"] = binary_args

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command(
        'update-manifest',
        description=
        'Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json',
        category='testing',
        parser=create_parser_wpt)
    def update_manifest(self, **kwargs):
        kwargs['test_list'].append(str('SKIP_TESTS'))
        kwargs['manifest_update'] = True
        return self.test_wpt(**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    @CommandArgument(
        '--patch',
        action='store_true',
        default=False,
        help='Create an mq patch or git commit containing the changes')
    def update_wpt(self, patch, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        kwargs["no_patch"] = not patch

        if kwargs["no_patch"] and kwargs["sync"]:
            print("Are you sure you don't want a patch?")
            return 1

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('test-dromaeo',
             description='Run the Dromaeo test suite',
             category='testing')
    @CommandArgument('tests',
                     default=["recommended"],
                     nargs="...",
                     help="Specific tests to run")
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_dromaeo(self, tests, release, dev):
        return self.dromaeo_test_runner(tests, release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('test-css',
             description='Run the web platform CSS tests',
             category='testing',
             parser=create_parser_wpt)
    def test_css(self, **kwargs):
        self.ensure_bootstrapped()
        return self.run_test_list_or_dispatch(kwargs["test_list"], "css",
                                              self._test_css, **kwargs)

    def _test_css(self, **kwargs):
        run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
        return self.wptrunner(run_file, **kwargs)

    @Command('update-css',
             description='Update the web platform CSS tests',
             category='testing',
             parser=updatecommandline.create_parser())
    @CommandArgument(
        '--patch',
        action='store_true',
        default=False,
        help='Create an mq patch or git commit containing the changes')
    def update_css(self, patch, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
        kwargs["no_patch"] = not patch

        if kwargs["no_patch"] and kwargs["sync"]:
            print("Are you sure you don't want a patch?")
            return 1

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('compare_dromaeo',
             description=
             'Compare outputs of two runs of ./mach test-dromaeo command',
             category='testing')
    @CommandArgument(
        'params',
        default=None,
        nargs="...",
        help=" filepaths of output files of two runs of dromaeo test ")
    def compare_dromaeo(self, params):
        prev_op_filename = params[0]
        cur_op_filename = params[1]
        result = {
            'Test': [],
            'Prev_Time': [],
            'Cur_Time': [],
            'Difference(%)': []
        }
        with open(prev_op_filename,
                  'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
            l1 = prev_op.readline()
            l2 = cur_op.readline()

            while ((l1.find('[dromaeo] Saving...')
                    and l2.find('[dromaeo] Saving...'))):
                l1 = prev_op.readline()
                l2 = cur_op.readline()

            reach = 3
            while (reach > 0):
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                reach -= 1

            while True:
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                if not l1:
                    break
                result['Test'].append(str(l1).split('|')[0].strip())
                result['Prev_Time'].append(float(
                    str(l1).split('|')[1].strip()))
                result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
                a = float(str(l1).split('|')[1].strip())
                b = float(str(l2).split('|')[1].strip())
                result['Difference(%)'].append(((b - a) / a) * 100)

            width_col1 = max([len(x) for x in result['Test']])
            width_col2 = max([len(str(x)) for x in result['Prev_Time']])
            width_col3 = max([len(str(x)) for x in result['Cur_Time']])
            width_col4 = max([len(str(x)) for x in result['Difference(%)']])

            for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'],
                                  ['Difference(%)']):
                print(
                    "\033[1m" + "{}|{}|{}|{}".format(
                        p.ljust(width_col1), q.ljust(width_col2),
                        r.ljust(width_col3), s.ljust(width_col4)) + "\033[0m" +
                    "\n" +
                    "--------------------------------------------------" +
                    "-------------------------------------------------------------------------"
                )

            for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'],
                                      result['Cur_Time'],
                                      result['Difference(%)']):
                if d1 > 0:
                    print("\033[91m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                elif d1 < 0:
                    print("\033[92m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                else:
                    print("{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)))

    def jquery_test_runner(self, cmd, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/servo/jquery", jquery_dir
            ])

        # Run pull in case the jQuery repo was updated since last test run
        check_call(["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return call([run_file, cmd, bin_path, base_dir])

    def dromaeo_test_runner(self, tests, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "dromaeo"))
        dromaeo_dir = path.join(base_dir, "dromaeo")
        run_file = path.join(base_dir, "run_dromaeo.py")

        # Clone the Dromaeo repository if it doesn't exist
        if not os.path.isdir(dromaeo_dir):
            check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/notriddle/dromaeo", dromaeo_dir
            ])

        # Run pull in case the Dromaeo repo was updated since last test run
        check_call(["git", "-C", dromaeo_dir, "pull"])

        # Compile test suite
        check_call(["make", "-C", dromaeo_dir, "web"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return check_call([run_file, "|".join(tests), bin_path, base_dir])
Example #3
0
def create_parser_update():
    from update import updatecommandline
    return updatecommandline.create_parser()
Example #4
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    @Command('test',
             description='Run specified Servo tests',
             category='testing')
    @CommandArgument('params',
                     default=None,
                     nargs="...",
                     help="Optionally select test based on "
                     "test file directory")
    @CommandArgument('--render-mode',
                     '-rm',
                     default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. " +
                     HELP_RENDER_MODE)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    @CommandArgument('--tidy-all',
                     default=False,
                     action="store_true",
                     help="Check all files, and run the WPT lint in tidy, "
                     "even if unchanged")
    @CommandArgument('--no-progress',
                     default=False,
                     action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test',
                     default=False,
                     action="store_true",
                     help="Run unit tests for tidy")
    @CommandArgument('--all',
                     default=False,
                     action="store_true",
                     dest="all_suites",
                     help="Run all test suites")
    def test(self,
             params,
             render_mode=DEFAULT_RENDER_MODE,
             release=False,
             tidy_all=False,
             no_progress=False,
             self_test=False,
             all_suites=False):
        suites = copy.deepcopy(TEST_SUITES)
        suites["tidy"]["kwargs"] = {
            "all_files": tidy_all,
            "no_progress": no_progress,
            "self_test": self_test,
            "stylo": False
        }
        suites["wpt"]["kwargs"] = {"release": release}
        suites["unit"]["kwargs"] = {}

        selected_suites = OrderedDict()

        if params is None:
            if all_suites:
                params = suites.keys()
            else:
                print(
                    "Specify a test path or suite name, or pass --all to run all test suites.\n\nAvailable suites:"
                )
                for s in suites:
                    print("    %s" % s)
                return 1

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True
            else:
                suite = self.suite_for_path(arg)
                if suite is not None:
                    if suite not in selected_suites:
                        selected_suites[suite] = []
                    selected_suites[suite].append(arg)
                    found = True
                    break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    # Helper to determine which test suite owns the path
    def suite_for_path(self, path_arg):
        if os.path.exists(path.abspath(path_arg)):
            abs_path = path.abspath(path_arg)
            for prefix, suite in TEST_SUITES_BY_PREFIX.iteritems():
                if abs_path.startswith(prefix):
                    return suite
        return None

    @Command('test-perf',
             description='Run the page load performance test',
             category='testing')
    @CommandArgument('--base', default=None, help="the base URL for testcases")
    @CommandArgument('--date', default=None, help="the datestamp for the data")
    @CommandArgument('--submit',
                     '-a',
                     default=False,
                     action="store_true",
                     help="submit the data to perfherder")
    def test_perf(self, base=None, date=None, submit=False):
        self.set_software_rendering_env(True)

        self.ensure_bootstrapped()
        env = self.build_env()
        cmd = ["bash", "test_perf.sh"]
        if base:
            cmd += ["--base", base]
        if date:
            cmd += ["--date", date]
        if submit:
            cmd += ["--submit"]
        return call(cmd, env=env, cwd=path.join("etc", "ci", "performance"))

    @Command('test-unit', description='Run unit tests', category='testing')
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path"
                     )
    @CommandArgument('--package',
                     '-p',
                     default=None,
                     help="Specific package to test")
    @CommandArgument('--bench',
                     default=False,
                     action="store_true",
                     help="Run in bench mode")
    @CommandArgument('--nocapture',
                     default=False,
                     action="store_true",
                     help="Run tests with nocapture ( show test stdout )")
    def test_unit(self,
                  test_name=None,
                  package=None,
                  bench=False,
                  nocapture=False):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/unit/<package>'
            match = re.search("tests/unit/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        in_crate_packages = []

        # Since the selectors tests have no corresponding selectors_tests crate in tests/unit,
        # we need to treat them separately from those that do.
        try:
            packages.remove('selectors')
            in_crate_packages += ["selectors"]
        except KeyError:
            pass

        if not packages:
            packages = set(
                os.listdir(path.join(self.context.topdir, "tests",
                                     "unit"))) - set(['.DS_Store'])
            in_crate_packages += ["selectors"]

        # Since the selectors tests have no corresponding selectors_tests crate in tests/unit,
        # we need to treat them separately from those that do.
        try:
            packages.remove('selectors')
            in_crate_packages += ["selectors"]
        except KeyError:
            pass

        packages.discard('stylo')

        env = self.build_env(test_unit=True)
        env["RUST_BACKTRACE"] = "1"

        if "msvc" in host_triple():
            # on MSVC, we need some DLLs in the path. They were copied
            # in to the servo.exe build dir, so just point PATH to that.
            env["PATH"] = "%s%s%s" % (path.dirname(
                self.get_binary_path(False, False)), os.pathsep, env["PATH"])

        features = self.servo_features()
        if len(packages) > 0:
            args = [
                "cargo", "bench" if bench else "test", "--manifest-path",
                self.servo_manifest()
            ]
            for crate in packages:
                args += ["-p", "%s_tests" % crate]
            for crate in in_crate_packages:
                args += ["-p", crate]
            args += test_patterns

            if features:
                args += ["--features", "%s" % ' '.join(features)]

            if nocapture:
                args += ["--", "--nocapture"]

            err = self.call_rustup_run(args, env=env)
            if err is not 0:
                return err

    @Command('test-stylo',
             description='Run stylo unit tests',
             category='testing')
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path"
                     )
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_stylo(self, release=False, test_name=None):
        self.set_use_geckolib_toolchain()
        self.ensure_bootstrapped()

        env = self.build_env()
        env["RUST_BACKTRACE"] = "1"
        env["CARGO_TARGET_DIR"] = path.join(self.context.topdir, "target",
                                            "geckolib").encode("UTF-8")

        args = ([
            "cargo", "test", "--manifest-path",
            self.geckolib_manifest(), "-p", "stylo_tests"
        ] + (["--release"] if release else []) + (test_name or []))
        return self.call_rustup_run(args, env=env)

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    @CommandArgument('--all',
                     default=False,
                     action="store_true",
                     dest="all_files",
                     help="Check all files, and run the WPT lint in tidy, "
                     "even if unchanged")
    @CommandArgument('--no-progress',
                     default=False,
                     action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test',
                     default=False,
                     action="store_true",
                     help="Run unit tests for tidy")
    @CommandArgument('--stylo',
                     default=False,
                     action="store_true",
                     help="Only handle files in the stylo tree")
    def test_tidy(self, all_files, no_progress, self_test, stylo):
        if self_test:
            return test_tidy.do_tests()
        else:
            return tidy.scan(not all_files, not no_progress, stylo=stylo)

    @Command('test-webidl',
             description='Run the WebIDL parser tests',
             category='testing')
    @CommandArgument('--quiet',
                     '-q',
                     default=False,
                     action="store_true",
                     help="Don't print passing tests.")
    @CommandArgument(
        'tests',
        default=None,
        nargs="...",
        help="Specific tests to run, relative to the tests directory")
    def test_webidl(self, quiet, tests):
        self.ensure_bootstrapped()

        test_file_dir = path.abspath(
            path.join(PROJECT_TOPLEVEL_PATH, "components", "script", "dom",
                      "bindings", "codegen", "parser"))
        # For the `import WebIDL` in runtests.py
        sys.path.insert(0, test_file_dir)

        run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)

        verbose = not quiet
        return run_globals["run_tests"](tests, verbose)

    @Command(
        'test-wpt-failure',
        description=
        'Run the tests harness that verifies that the test failures are reported correctly',
        category='testing',
        parser=create_parser_wpt)
    def test_wpt_failure(self, **kwargs):
        self.ensure_bootstrapped()
        kwargs["pause_after_test"] = False
        kwargs["include"] = ["infrastructure/failing-test.html"]
        return not self._test_wpt(**kwargs)

    @Command('test-wpt',
             description='Run the regular web platform test suite',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        ret = self.run_test_list_or_dispatch(kwargs["test_list"], "wpt",
                                             self._test_wpt, **kwargs)
        if kwargs["always_succeed"]:
            return 0
        else:
            return ret

    def _test_wpt(self, **kwargs):
        hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt',
                                    'hosts')
        os.environ["hosts_file_path"] = hosts_file_path
        run_file = path.abspath(
            path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
        return self.wptrunner(run_file, **kwargs)

    # Helper to ensure all specified paths are handled, otherwise dispatch to appropriate test suite.
    def run_test_list_or_dispatch(self, requested_paths, correct_suite,
                                  correct_function, **kwargs):
        if not requested_paths:
            return correct_function(**kwargs)
        # Paths specified on command line. Ensure they can be handled, re-dispatch otherwise.
        all_handled = True
        for test_path in requested_paths:
            suite = self.suite_for_path(test_path)
            if suite is not None and correct_suite != suite:
                all_handled = False
                print("Warning: %s is not a %s test. Delegating to test-%s." %
                      (test_path, correct_suite, suite))
        if all_handled:
            return correct_function(**kwargs)
        # Dispatch each test to the correct suite via test()
        Registrar.dispatch("test",
                           context=self.context,
                           params=requested_paths)

    # Helper for test_css and test_wpt:
    def wptrunner(self, run_file, **kwargs):
        self.set_software_rendering_env(kwargs['release'])

        # By default, Rayon selects the number of worker threads
        # based on the available CPU count. This doesn't work very
        # well when running tests on CI, since we run so many
        # Servo processes in parallel. The result is a lot of
        # extra timeouts. Instead, force Rayon to assume we are
        # running on a 2 CPU environment.
        os.environ['RAYON_RS_NUM_CPUS'] = "2"

        os.environ["RUST_BACKTRACE"] = "1"
        kwargs["debug"] = not kwargs["release"]
        if kwargs.pop("rr_chaos"):
            kwargs["debugger"] = "rr"
            kwargs["debugger_args"] = "record --chaos"
            kwargs["repeat_until_unexpected"] = True
            # TODO: Delete rr traces from green test runs?
        prefs = kwargs.pop("prefs")
        if prefs:
            binary_args = []
            for pref in prefs:
                binary_args.append("--pref=" + pref)
            kwargs["binary_args"] = binary_args

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command(
        'update-manifest',
        description=
        'Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json',
        category='testing',
        parser=create_parser_wpt)
    def update_manifest(self, **kwargs):
        kwargs['test_list'].append(str('SKIP_TESTS'))
        kwargs['manifest_update'] = True
        return self.test_wpt(**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        patch = kwargs.get("patch", False)

        if not patch and kwargs["sync"]:
            print("Are you sure you don't want a patch?")
            return 1

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command(
        'filter-intermittents',
        description=
        'Given a WPT error summary file, filter out intermittents and other cruft.',
        category='testing')
    @CommandArgument('summary', help="Error summary log to take un")
    @CommandArgument('--log-filteredsummary',
                     default=None,
                     help='Print filtered log to file')
    @CommandArgument('--log-intermittents',
                     default=None,
                     help='Print intermittents to file')
    @CommandArgument(
        '--auth',
        default=None,
        help=
        'File containing basic authorization credentials for Github API (format `username:password`)'
    )
    @CommandArgument(
        '--tracker-api',
        default=None,
        action='store',
        help='The API endpoint for tracking known intermittent failures.')
    @CommandArgument(
        '--reporter-api',
        default=None,
        action='store',
        help='The API endpoint for reporting tracked intermittent failures.')
    def filter_intermittents(self, summary, log_filteredsummary,
                             log_intermittents, auth, tracker_api,
                             reporter_api):
        encoded_auth = None
        if auth:
            with open(auth, "r") as file:
                encoded_auth = base64.encodestring(
                    file.read().strip()).replace('\n', '')
        failures = []
        with open(summary, "r") as file:
            for line in file:
                line_json = json.loads(line)
                if 'status' in line_json:
                    failures += [line_json]
        actual_failures = []
        intermittents = []
        for failure in failures:
            if tracker_api:
                if tracker_api == 'default':
                    tracker_api = "http://build.servo.org/intermittent-tracker"
                elif tracker_api.endswith('/'):
                    tracker_api = tracker_api[0:-1]

                query = urllib2.quote(failure['test'], safe='')
                request = urllib2.Request("%s/query.py?name=%s" %
                                          (tracker_api, query))
                search = urllib2.urlopen(request)
                data = json.load(search)
                if len(data) == 0:
                    actual_failures += [failure]
                else:
                    intermittents += [failure]
            else:
                qstr = "repo:servo/servo+label:I-intermittent+type:issue+state:open+%s" % failure[
                    'test']
                # we want `/` to get quoted, but not `+` (github's API doesn't like that), so we set `safe` to `+`
                query = urllib2.quote(qstr, safe='+')
                request = urllib2.Request(
                    "https://api.github.com/search/issues?q=%s" % query)
                if encoded_auth:
                    request.add_header("Authorization",
                                       "Basic %s" % encoded_auth)
                search = urllib2.urlopen(request)
                data = json.load(search)
                if data['total_count'] == 0:
                    actual_failures += [failure]
                else:
                    intermittents += [failure]

        if reporter_api:
            if reporter_api == 'default':
                reporter_api = "http://build.servo.org/intermittent-failure-tracker"
            if reporter_api.endswith('/'):
                reporter_api = reporter_api[0:-1]
            reported = set()

            proc = subprocess.Popen(
                ["git", "log", "--merges", "--oneline", "-1"],
                stdout=subprocess.PIPE)
            (last_merge, _) = proc.communicate()

            # Extract the issue reference from "abcdef Auto merge of #NNN"
            pull_request = int(last_merge.split(' ')[4][1:])

            for intermittent in intermittents:
                if intermittent['test'] in reported:
                    continue
                reported.add(intermittent['test'])

                data = {
                    'test_file':
                    intermittent['test'],
                    'platform':
                    platform.system(),
                    'builder':
                    os.environ.get('BUILDER_NAME', 'BUILDER NAME MISSING'),
                    'number':
                    pull_request,
                }
                request = urllib2.Request("%s/record.py" % reporter_api,
                                          urllib.urlencode(data))
                request.add_header('Accept', 'application/json')
                response = urllib2.urlopen(request)
                data = json.load(response)
                if data['status'] != "success":
                    print('Error reporting test failure: ' + data['error'])

        if log_intermittents:
            with open(log_intermittents, "w") as intermittents_file:
                for intermittent in intermittents:
                    json.dump(intermittent, intermittents_file)
                    print("\n", end='', file=intermittents_file)

        if len(actual_failures) == 0:
            return 0

        output = open(log_filteredsummary,
                      "w") if log_filteredsummary else sys.stdout
        for failure in actual_failures:
            json.dump(failure, output)
            print("\n", end='', file=output)

        if output is not sys.stdout:
            output.close()
        return 1

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('test-dromaeo',
             description='Run the Dromaeo test suite',
             category='testing')
    @CommandArgument('tests',
                     default=["recommended"],
                     nargs="...",
                     help="Specific tests to run")
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_dromaeo(self, tests, release, dev):
        return self.dromaeo_test_runner(tests, release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('compare_dromaeo',
             description=
             'Compare outputs of two runs of ./mach test-dromaeo command',
             category='testing')
    @CommandArgument(
        'params',
        default=None,
        nargs="...",
        help=" filepaths of output files of two runs of dromaeo test ")
    def compare_dromaeo(self, params):
        prev_op_filename = params[0]
        cur_op_filename = params[1]
        result = {
            'Test': [],
            'Prev_Time': [],
            'Cur_Time': [],
            'Difference(%)': []
        }
        with open(prev_op_filename,
                  'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
            l1 = prev_op.readline()
            l2 = cur_op.readline()

            while ((l1.find('[dromaeo] Saving...')
                    and l2.find('[dromaeo] Saving...'))):
                l1 = prev_op.readline()
                l2 = cur_op.readline()

            reach = 3
            while (reach > 0):
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                reach -= 1

            while True:
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                if not l1:
                    break
                result['Test'].append(str(l1).split('|')[0].strip())
                result['Prev_Time'].append(float(
                    str(l1).split('|')[1].strip()))
                result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
                a = float(str(l1).split('|')[1].strip())
                b = float(str(l2).split('|')[1].strip())
                result['Difference(%)'].append(((b - a) / a) * 100)

            width_col1 = max([len(x) for x in result['Test']])
            width_col2 = max([len(str(x)) for x in result['Prev_Time']])
            width_col3 = max([len(str(x)) for x in result['Cur_Time']])
            width_col4 = max([len(str(x)) for x in result['Difference(%)']])

            for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'],
                                  ['Difference(%)']):
                print(
                    "\033[1m" + "{}|{}|{}|{}".format(
                        p.ljust(width_col1), q.ljust(width_col2),
                        r.ljust(width_col3), s.ljust(width_col4)) + "\033[0m" +
                    "\n" +
                    "--------------------------------------------------" +
                    "-------------------------------------------------------------------------"
                )

            for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'],
                                      result['Cur_Time'],
                                      result['Difference(%)']):
                if d1 > 0:
                    print("\033[91m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                elif d1 < 0:
                    print("\033[92m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                else:
                    print("{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)))

    def jquery_test_runner(self, cmd, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/servo/jquery", jquery_dir
            ])

        # Run pull in case the jQuery repo was updated since last test run
        check_call(["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return call([run_file, cmd, bin_path, base_dir])

    def dromaeo_test_runner(self, tests, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "dromaeo"))
        dromaeo_dir = path.join(base_dir, "dromaeo")
        run_file = path.join(base_dir, "run_dromaeo.py")

        # Clone the Dromaeo repository if it doesn't exist
        if not os.path.isdir(dromaeo_dir):
            check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/notriddle/dromaeo", dromaeo_dir
            ])

        # Run pull in case the Dromaeo repo was updated since last test run
        check_call(["git", "-C", dromaeo_dir, "pull"])

        # Compile test suite
        check_call(["make", "-C", dromaeo_dir, "web"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return check_call([run_file, "|".join(tests), bin_path, base_dir])

    def set_software_rendering_env(self, use_release):
        # On Linux and mac, find the OSMesa software rendering library and
        # add it to the dynamic linker search path.
        try:
            bin_path = self.get_binary_path(use_release, not use_release)
            if not set_osmesa_env(bin_path, os.environ):
                print("Warning: Cannot set the path to OSMesa library.")
        except BuildNotFound:
            # This can occur when cross compiling (e.g. arm64), in which case
            # we won't run the tests anyway so can safely ignore this step.
            pass
Example #5
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    def ensure_built_tests(self, release=False):
        if self.context.built_tests:
            return
        returncode = Registrar.dispatch('build-tests',
                                        context=self.context,
                                        release=release)
        if returncode:
            sys.exit(returncode)
        self.context.built_tests = True

    def find_test(self, prefix, release=False):
        build_mode = "release" if release else "debug"
        target_contents = os.listdir(
            path.join(self.get_target_dir(), build_mode))
        for filename in target_contents:
            if filename.startswith(prefix + "-"):
                filepath = path.join(self.get_target_dir(), build_mode,
                                     filename)

                if path.isfile(filepath) and os.access(filepath, os.X_OK):
                    return filepath

    def run_test(self, prefix, args=[], release=False):
        t = self.find_test(prefix, release=release)
        if t:
            return subprocess.call([t] + args, env=self.build_env())

    @Command('test', description='Run all Servo tests', category='testing')
    @CommandArgument('params',
                     default=None,
                     nargs="...",
                     help="Optionally select test based on "
                     "test file directory")
    @CommandArgument('--render-mode',
                     '-rm',
                     default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. " +
                     HELP_RENDER_MODE)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False):
        suites = OrderedDict([
            ("tidy", {}),
            ("ref", {
                "kwargs": {
                    "kind": render_mode
                },
                "paths": [path.abspath(path.join("tests", "ref"))],
                "include_arg": "include"
            }),
            ("wpt", {
                "kwargs": {
                    "release": release
                },
                "paths": [
                    path.abspath(
                        path.join("tests", "wpt", "web-platform-tests")),
                    path.abspath(path.join("tests", "wpt", "mozilla"))
                ],
                "include_arg":
                "include"
            }),
            ("css", {
                "kwargs": {
                    "release": release
                },
                "paths":
                [path.abspath(path.join("tests", "wpt", "css-tests"))],
                "include_arg": "include"
            }),
            ("unit", {
                "kwargs": {},
                "paths": [path.abspath(path.join("tests", "unit"))],
                "include_arg": "test_name"
            })
        ])

        suites_by_prefix = {
            path: k
            for k, v in suites.iteritems() if "paths" in v
            for path in v["paths"]
        }

        selected_suites = OrderedDict()

        if params is None:
            params = suites.keys()

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True

            elif os.path.exists(path.abspath(arg)):
                abs_path = path.abspath(arg)
                for prefix, suite in suites_by_prefix.iteritems():
                    if abs_path.startswith(prefix):
                        if suite not in selected_suites:
                            selected_suites[suite] = []
                        selected_suites[suite].append(arg)
                        found = True
                        break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    @Command('test-unit', description='Run unit tests', category='testing')
    @CommandArgument('--package',
                     '-p',
                     default=None,
                     help="Specific package to test")
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path"
                     )
    def test_unit(self, test_name=None, package=None):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/unit/<package>'
            match = re.search("tests/unit/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        if not packages:
            packages = set(
                os.listdir(path.join(self.context.topdir, "tests", "unit")))

        args = ["cargo", "test"]
        for crate in packages:
            args += ["-p", "%s_tests" % crate]
        args += test_patterns
        result = subprocess.call(args,
                                 env=self.build_env(),
                                 cwd=self.servo_crate())
        if result != 0:
            return result

    @Command('test-ref',
             description='Run the reference tests',
             category='testing')
    @CommandArgument('--kind',
                     '-k',
                     default=DEFAULT_RENDER_MODE,
                     help=HELP_RENDER_MODE)
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run with a release build of Servo')
    @CommandArgument('--include',
                     default=None,
                     nargs='+',
                     help="Only run tests that match this pattern. If the "
                     "path to the ref test directory is included, it "
                     "will automatically be trimmed out.")
    @CommandArgument(
        'servo_params',
        default=None,
        nargs=argparse.REMAINDER,
        help="Command-line arguments to be passed through to Servo")
    def test_ref(self,
                 kind=DEFAULT_RENDER_MODE,
                 include=None,
                 servo_params=None,
                 release=False):
        self.ensure_bootstrapped()
        self.ensure_built_tests(release=release)
        assert kind is not None, 'kind cannot be None, see help'

        kinds = ["cpu", "gpu"] if kind == 'both' else [kind]
        test_path = path.join(self.context.topdir, "tests", "ref")
        error = False

        test_start = time()
        for k in kinds:
            print("Running %s reftests..." % k)
            test_args = [k, test_path]
            if include is not None:
                ref_path = path.join("tests", "ref")
                for name in include:
                    # Check to see if we were passed something leading with the
                    # path to the ref test directory, and trim it so that reftest
                    # knows how to filter it.
                    maybe_path = path.normpath(name)
                    if ref_path in maybe_path:
                        test_args.append(path.relpath(maybe_path, ref_path))
                    else:
                        test_args.append(name)
            if servo_params is not None:
                test_args += ["--"] + servo_params
            ret = self.run_test("reftest", test_args, release=release)
            error = error or ret != 0
        elapsed = time() - test_start

        print("Reference tests completed in %0.2fs" % elapsed)

        if error:
            return 1

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    def test_tidy(self):
        return tidy.scan()

    @Command('test-wpt-failure',
             description='Run the web platform tests',
             category='testing')
    def test_wpt_failure(self):
        self.ensure_bootstrapped()
        return not subprocess.call([
            "bash",
            path.join("tests", "wpt", "run.sh"), "--no-pause-after-test",
            "--include", "infrastructure/failing-test.html"
        ],
                                   env=self.build_env())

    @Command('test-wpt',
             description='Run the web platform tests',
             category='testing',
             parser=wptcommandline.create_parser)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt',
                                    'hosts')

        os.environ["hosts_file_path"] = hosts_file_path
        os.environ["RUST_BACKTRACE"] = "1"

        kwargs["debug"] = not kwargs["release"]

        run_file = path.abspath(
            path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('test-dromaeo',
             description='Run the Dromaeo test suite',
             category='testing')
    @CommandArgument('tests',
                     default=["recommended"],
                     nargs="...",
                     help="Specific tests to run")
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_dromaeo(self, tests, release, dev):
        return self.dromaeo_test_runner(tests, release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('test-css',
             description='Run the web platform tests',
             category='testing',
             parser=create_parser_wpt)
    def test_css(self, **kwargs):
        self.ensure_bootstrapped()

        run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-css',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_css(self, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('compare_dromaeo',
             description=
             'compare outputs of two runs of ./mach test-dromaeo command',
             category='testing')
    @CommandArgument(
        'params',
        default=None,
        nargs="...",
        help=" filepaths of output files of two runs of dromaeo test ")
    def compare_dromaeo(self, params):
        prev_op_filename = params[0]
        cur_op_filename = params[1]
        result = {
            'Test': [],
            'Prev_Time': [],
            'Cur_Time': [],
            'Difference(%)': []
        }
        with open(prev_op_filename,
                  'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
            l1 = prev_op.readline()
            l2 = cur_op.readline()

            while ((l1.find('[dromaeo] Saving...')
                    and l2.find('[dromaeo] Saving...'))):
                l1 = prev_op.readline()
                l2 = cur_op.readline()

            reach = 3
            while (reach > 0):
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                reach -= 1

            while True:
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                if not l1:
                    break
                result['Test'].append(str(l1).split('|')[0].strip())
                result['Prev_Time'].append(float(
                    str(l1).split('|')[1].strip()))
                result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
                a = float(str(l1).split('|')[1].strip())
                b = float(str(l2).split('|')[1].strip())
                result['Difference(%)'].append(((b - a) / a) * 100)

            width_col1 = max([len(x) for x in result['Test']])
            width_col2 = max([len(str(x)) for x in result['Prev_Time']])
            width_col3 = max([len(str(x)) for x in result['Cur_Time']])
            width_col4 = max([len(str(x)) for x in result['Difference(%)']])

            for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'],
                                  ['Difference(%)']):
                print(
                    "\033[1m" + "{}|{}|{}|{}".format(
                        p.ljust(width_col1), q.ljust(width_col2),
                        r.ljust(width_col3), s.ljust(width_col4)) + "\033[0m" +
                    "\n" +
                    "--------------------------------------------------" +
                    "-------------------------------------------------------------------------"
                )

            for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'],
                                      result['Cur_Time'],
                                      result['Difference(%)']):
                if d1 > 0:
                    print("\033[91m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                elif d1 < 0:
                    print("\033[92m" +
                          "{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)) +
                          "\033[0m")
                else:
                    print("{}|{}|{}|{}".format(a1.ljust(width_col1),
                                               str(b1).ljust(width_col2),
                                               str(c1).ljust(width_col3),
                                               str(d1).ljust(width_col4)))

    def jquery_test_runner(self, cmd, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            subprocess.check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/servo/jquery", jquery_dir
            ])

        # Run pull in case the jQuery repo was updated since last test run
        subprocess.check_call(["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return subprocess.check_call([run_file, cmd, bin_path, base_dir])

    def dromaeo_test_runner(self, tests, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "dromaeo"))
        dromaeo_dir = path.join(base_dir, "dromaeo")
        run_file = path.join(base_dir, "run_dromaeo.py")

        # Clone the Dromaeo repository if it doesn't exist
        if not os.path.isdir(dromaeo_dir):
            subprocess.check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/notriddle/dromaeo", dromaeo_dir
            ])

        # Run pull in case the Dromaeo repo was updated since last test run
        subprocess.check_call(["git", "-C", dromaeo_dir, "pull"])

        # Compile test suite
        subprocess.check_call(["make", "-C", dromaeo_dir, "web"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return subprocess.check_call(
            [run_file, "|".join(tests), bin_path, base_dir])
Example #6
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    @Command('test',
             description='Run specified Servo tests',
             category='testing')
    @CommandArgument('params', default=None, nargs="...",
                     help="Optionally select test based on "
                          "test file directory")
    @CommandArgument('--render-mode', '-rm', default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. "
                          + HELP_RENDER_MODE)
    @CommandArgument('--release', default=False, action="store_true",
                     help="Run with a release build of servo")
    @CommandArgument('--tidy-all', default=False, action="store_true",
                     help="Check all files, and run the WPT lint in tidy, "
                          "even if unchanged")
    @CommandArgument('--no-progress', default=False, action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test', default=False, action="store_true",
                     help="Run unit tests for tidy")
    @CommandArgument('--all', default=False, action="store_true", dest="all_suites",
                     help="Run all test suites")
    def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False, tidy_all=False,
             no_progress=False, self_test=False, all_suites=False):
        suites = copy.deepcopy(TEST_SUITES)
        suites["tidy"]["kwargs"] = {"all_files": tidy_all, "no_progress": no_progress, "self_test": self_test,
                                    "stylo": False}
        suites["wpt"]["kwargs"] = {"release": release}
        suites["unit"]["kwargs"] = {}

        selected_suites = OrderedDict()

        if params is None:
            if all_suites:
                params = suites.keys()
            else:
                print("Specify a test path or suite name, or pass --all to run all test suites.\n\nAvailable suites:")
                for s in suites:
                    print("    %s" % s)
                return 1

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True
            else:
                suite = self.suite_for_path(arg)
                if suite is not None:
                    if suite not in selected_suites:
                        selected_suites[suite] = []
                    selected_suites[suite].append(arg)
                    found = True
                    break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time.time()
        for suite, tests in iteritems(selected_suites):
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite, context=self.context, **kwargs)

        elapsed = time.time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    # Helper to determine which test suite owns the path
    def suite_for_path(self, path_arg):
        if os.path.exists(path.abspath(path_arg)):
            abs_path = path.abspath(path_arg)
            for prefix, suite in iteritems(TEST_SUITES_BY_PREFIX):
                if abs_path.startswith(prefix):
                    return suite
        return None

    @Command('test-perf',
             description='Run the page load performance test',
             category='testing')
    @CommandArgument('--base', default=None,
                     help="the base URL for testcases")
    @CommandArgument('--date', default=None,
                     help="the datestamp for the data")
    @CommandArgument('--submit', '-a', default=False, action="store_true",
                     help="submit the data to perfherder")
    def test_perf(self, base=None, date=None, submit=False):
        env = self.build_env()
        cmd = ["bash", "test_perf.sh"]
        if base:
            cmd += ["--base", base]
        if date:
            cmd += ["--date", date]
        if submit:
            cmd += ["--submit"]
        return call(cmd,
                    env=env,
                    cwd=path.join("etc", "ci", "performance"))

    @Command('test-unit',
             description='Run unit tests',
             category='testing')
    @CommandArgument('test_name', nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path")
    @CommandArgument('--package', '-p', default=None, help="Specific package to test")
    @CommandArgument('--bench', default=False, action="store_true",
                     help="Run in bench mode")
    @CommandArgument('--nocapture', default=False, action="store_true",
                     help="Run tests with nocapture ( show test stdout )")
    @CommandBase.build_like_command_arguments
    def test_unit(self, test_name=None, package=None, bench=False, nocapture=False, with_layout_2020=False, **kwargs):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/unit/<package>'
            match = re.search("tests/unit/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        self_contained_tests = [
            "background_hang_monitor",
            "gfx",
            "msg",
            "net",
            "net_traits",
            "selectors",
            "servo_config",
            "servo_remutex",
        ]
        if with_layout_2020:
            self_contained_tests.append("layout_2020")
        else:
            self_contained_tests.append("layout_2013")
        if not packages:
            packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit"))) - set(['.DS_Store'])
            packages |= set(self_contained_tests)

        in_crate_packages = []
        for crate in self_contained_tests:
            try:
                packages.remove(crate)
                in_crate_packages += [crate]
            except KeyError:
                pass

        packages.discard('stylo')

        env = self.build_env(test_unit=True)
        # FIXME: https://github.com/servo/servo/issues/26192
        if "apple-darwin" not in host_triple():
            env["RUST_BACKTRACE"] = "1"

        if "msvc" in host_triple():
            # on MSVC, we need some DLLs in the path. They were copied
            # in to the servo.exe build dir, so just point PATH to that.
            env["PATH"] = "%s%s%s" % (path.dirname(self.get_binary_path(False, False)), os.pathsep, env["PATH"])

        if len(packages) > 0 or len(in_crate_packages) > 0:
            args = []
            for crate in packages:
                args += ["-p", "%s_tests" % crate]
            for crate in in_crate_packages:
                args += ["-p", crate]
            args += test_patterns

            if nocapture:
                args += ["--", "--nocapture"]

            err = self.run_cargo_build_like_command("bench" if bench else "test",
                                                    args,
                                                    env=env,
                                                    with_layout_2020=with_layout_2020,
                                                    **kwargs)
            if err:
                return err

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    def install_rustfmt(self):
        self.ensure_bootstrapped()
        with open(os.devnull, "w") as devnull:
            if self.call_rustup_run(["cargo", "fmt", "--version", "-q"],
                                    stderr=devnull) != 0:
                # Rustfmt is not installed. Install:
                self.call_rustup_run(["rustup", "component", "add", "rustfmt-preview"])

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    @CommandArgument('--all', default=False, action="store_true", dest="all_files",
                     help="Check all files, and run the WPT lint in tidy, "
                          "even if unchanged")
    @CommandArgument('--no-wpt', default=False, action="store_true", dest="no_wpt",
                     help="Skip checking that web-platform-tests manifests are up to date")
    @CommandArgument('--no-progress', default=False, action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test', default=False, action="store_true",
                     help="Run unit tests for tidy")
    @CommandArgument('--stylo', default=False, action="store_true",
                     help="Only handle files in the stylo tree")
    @CommandArgument('--force-cpp', default=False, action="store_true", help="Force CPP check")
    def test_tidy(self, all_files, no_progress, self_test, stylo, force_cpp=False, no_wpt=False):
        if self_test:
            return test_tidy.do_tests()
        else:
            if no_wpt:
                manifest_dirty = False
            else:
                manifest_dirty = run_update(self.context.topdir, check_clean=True)
            tidy_failed = tidy.scan(not all_files, not no_progress, stylo=stylo, no_wpt=no_wpt)
            self.install_rustfmt()
            rustfmt_failed = self.call_rustup_run(["cargo", "fmt", "--", "--check"])

            env = self.build_env()
            clangfmt_failed = False
            available, cmd, files = setup_clangfmt(env)
            if available:
                for file in files:
                    stdout = check_output([cmd, "-output-replacements-xml", file], env=env)
                    if len(XML(stdout)) > 0:
                        print("%s is not formatted correctly." % file)
                        clangfmt_failed = True
            elif force_cpp:
                print("Error: can't find suitable clang-format version. Required with --force-cpp.")
                return True

            if rustfmt_failed or clangfmt_failed:
                print("Run `./mach fmt` to fix the formatting")

            return tidy_failed or manifest_dirty or rustfmt_failed or clangfmt_failed

    @Command('test-webidl',
             description='Run the WebIDL parser tests',
             category='testing')
    @CommandArgument('--quiet', '-q', default=False, action="store_true",
                     help="Don't print passing tests.")
    @CommandArgument('tests', default=None, nargs="...",
                     help="Specific tests to run, relative to the tests directory")
    def test_webidl(self, quiet, tests):
        test_file_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH, "components", "script",
                                               "dom", "bindings", "codegen", "parser"))
        # For the `import WebIDL` in runtests.py
        sys.path.insert(0, test_file_dir)

        run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
        run_globals = {"__file__": run_file}
        exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)

        verbose = not quiet
        return run_globals["run_tests"](tests, verbose)

    @Command('test-wpt-failure',
             description='Run the tests harness that verifies that the test failures are reported correctly',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt_failure(self, **kwargs):
        kwargs["pause_after_test"] = False
        kwargs["include"] = ["infrastructure/failing-test.html"]
        return not self._test_wpt(**kwargs)

    @Command('test-wpt',
             description='Run the regular web platform test suite',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt(self, **kwargs):
        ret = self.run_test_list_or_dispatch(kwargs["test_list"], "wpt", self._test_wpt, **kwargs)
        if kwargs["always_succeed"]:
            return 0
        else:
            return ret

    @Command('test-wpt-android',
             description='Run the web platform test suite in an Android emulator',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt_android(self, release=False, dev=False, binary_args=None, **kwargs):
        kwargs.update(
            release=release,
            dev=dev,
            product="servodriver",
            processes=1,
            binary_args=self.in_android_emulator(release, dev) + (binary_args or []),
            binary=sys.executable,
        )
        return self._test_wpt(android=True, **kwargs)

    def _test_wpt(self, android=False, **kwargs):
        self.set_run_env(android)
        hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt', 'hosts')
        os.environ["HOST_FILE"] = hosts_file_path
        run_file = path.abspath(path.join(self.context.topdir, "tests", "wpt", "run.py"))
        return self.wptrunner(run_file, **kwargs)

    # Helper to ensure all specified paths are handled, otherwise dispatch to appropriate test suite.
    def run_test_list_or_dispatch(self, requested_paths, correct_suite, correct_function, **kwargs):
        if not requested_paths:
            return correct_function(**kwargs)
        # Paths specified on command line. Ensure they can be handled, re-dispatch otherwise.
        all_handled = True
        for test_path in requested_paths:
            suite = self.suite_for_path(test_path)
            if suite is not None and correct_suite != suite:
                all_handled = False
                print("Warning: %s is not a %s test. Delegating to test-%s." % (test_path, correct_suite, suite))
        if all_handled:
            return correct_function(**kwargs)
        # Dispatch each test to the correct suite via test()
        Registrar.dispatch("test", context=self.context, params=requested_paths)

    # Helper for test_css and test_wpt:
    def wptrunner(self, run_file, **kwargs):
        # By default, Rayon selects the number of worker threads
        # based on the available CPU count. This doesn't work very
        # well when running tests on CI, since we run so many
        # Servo processes in parallel. The result is a lot of
        # extra timeouts. Instead, force Rayon to assume we are
        # running on a 2 CPU environment.
        os.environ['RAYON_RS_NUM_CPUS'] = "2"

        os.environ["RUST_BACKTRACE"] = "1"
        kwargs["debug"] = not kwargs["release"]
        if kwargs.pop("rr_chaos"):
            kwargs["debugger"] = "rr"
            kwargs["debugger_args"] = "record --chaos"
            kwargs["repeat_until_unexpected"] = True
            # TODO: Delete rr traces from green test runs?
        prefs = kwargs.pop("prefs")
        if prefs:
            binary_args = []
            for pref in prefs:
                binary_args.append("--pref=" + pref)
            kwargs["binary_args"] = binary_args

        if not kwargs.get('no_default_test_types'):
            test_types = {
                "servo": ["testharness", "reftest", "wdspec"],
                "servodriver": ["testharness", "reftest"],
            }
            product = kwargs.get("product") or "servo"
            kwargs["test_types"] = test_types[product]

        run_globals = {"__file__": run_file}
        exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-manifest',
             description='Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json',
             category='testing',
             parser=create_parser_manifest_update)
    def update_manifest(self, **kwargs):
        return run_update(self.context.topdir, **kwargs)

    @Command('fmt',
             description='Format the Rust and CPP source files with rustfmt and clang-format',
             category='testing')
    def format_code(self):

        env = self.build_env()
        available, cmd, files = setup_clangfmt(env)
        if available and len(files) > 0:
            check_call([cmd, "-i"] + files, env=env)

        self.install_rustfmt()
        return self.call_rustup_run(["cargo", "fmt"])

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_wpt(self, **kwargs):
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        patch = kwargs.get("patch", False)

        if not patch and kwargs["sync"]:
            print("Are you sure you don't want a patch?")
            return 1

        run_globals = {"__file__": run_file}
        exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('filter-intermittents',
             description='Given a WPT error summary file, filter out intermittents and other cruft.',
             category='testing')
    @CommandArgument('summary',
                     help="Error summary log to take in")
    @CommandArgument('--log-filteredsummary', default=None,
                     help='Print filtered log to file')
    @CommandArgument('--log-intermittents', default=None,
                     help='Print intermittents to file')
    @CommandArgument('--auth', default=None,
                     help='File containing basic authorization credentials for Github API (format `username:password`)')
    @CommandArgument('--tracker-api', default=None, action='store',
                     help='The API endpoint for tracking known intermittent failures.')
    @CommandArgument('--reporter-api', default=None, action='store',
                     help='The API endpoint for reporting tracked intermittent failures.')
    def filter_intermittents(self, summary, log_filteredsummary, log_intermittents, auth, tracker_api, reporter_api):
        encoded_auth = None
        if auth:
            with open(auth, "r") as file:
                encoded_auth = base64.encodestring(file.read().strip()).replace('\n', '')
        failures = []
        with open(summary, "r") as file:
            failures = [json.loads(line) for line in file]
        actual_failures = []
        intermittents = []
        for failure in failures:
            if tracker_api:
                if tracker_api == 'default':
                    tracker_api = "https://build.servo.org/intermittent-tracker"
                elif tracker_api.endswith('/'):
                    tracker_api = tracker_api[0:-1]

                if 'test' not in failure:
                    continue
                query = urllib.parse.quote(failure['test'], safe='')
                request = urllib.request.Request("%s/query.py?name=%s" % (tracker_api, query))
                search = urllib.request.urlopen(request)
                data = json.load(search)
                is_intermittent = len(data) > 0
            else:
                qstr = "repo:servo/servo+label:I-intermittent+type:issue+state:open+%s" % failure['test']
                # we want `/` to get quoted, but not `+` (github's API doesn't like that), so we set `safe` to `+`
                query = urllib.parse.quote(qstr, safe='+')
                request = urllib.request.Request("https://api.github.com/search/issues?q=%s" % query)
                if encoded_auth:
                    request.add_header("Authorization", "Basic %s" % encoded_auth)
                search = urllib.request.urlopen(request)
                data = json.load(search)
                is_intermittent = data['total_count'] > 0

            if is_intermittent:
                if 'output' in failure:
                    intermittents.append(failure["output"])
                else:
                    intermittents.append("%s [expected %s] %s \n"
                                         % (failure["status"], failure["expected"], failure['test']))
            else:
                if 'output' in failure:
                    actual_failures.append(failure["output"])
                else:
                    actual_failures.append("%s [expected %s] %s \n"
                                           % (failure["status"], failure["expected"], failure['test']))

        def format(outputs, description, file=sys.stdout):
            formatted = "%s %s:\n%s" % (len(outputs), description, "\n".join(outputs))
            if file == sys.stdout:
                file.write(formatted)
            else:
                file.write(formatted.encode("utf-8"))

        if log_intermittents:
            with open(log_intermittents, "wb") as file:
                format(intermittents, "known-intermittent unexpected results", file)

        description = "unexpected results that are NOT known-intermittents"
        if log_filteredsummary:
            with open(log_filteredsummary, "wb") as file:
                format(actual_failures, description, file)

        if actual_failures:
            format(actual_failures, description)

        return bool(actual_failures)

    @Command('test-android-startup',
             description='Extremely minimal testing of Servo for Android',
             category='testing')
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def test_android_startup(self, release, dev):
        html = """
            <script>
                window.alert("JavaScript is running!")
            </script>
        """
        url = "data:text/html;base64," + html.encode("base64").replace("\n", "")
        args = self.in_android_emulator(release, dev)
        args = [sys.executable] + args + [url]
        process = subprocess.Popen(args, stdout=subprocess.PIPE)
        try:
            while 1:
                line = process.stdout.readline()
                if len(line) == 0:
                    print("EOF without finding the expected line")
                    return 1
                print(line.rstrip())
                if "JavaScript is running!" in line:
                    break
        finally:
            process.terminate()

    def in_android_emulator(self, release, dev):
        if (release and dev) or not (release or dev):
            print("Please specify one of --dev or --release.")
            sys.exit(1)

        avd = "servo-x86"
        target = "i686-linux-android"
        print("Assuming --target " + target)

        env = self.build_env(target=target)
        os.environ["PATH"] = env["PATH"]
        assert self.handle_android_target(target)
        apk = self.get_apk_path(release)

        py = path.join(self.context.topdir, "etc", "run_in_headless_android_emulator.py")
        return [py, avd, apk]

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('test-dromaeo',
             description='Run the Dromaeo test suite',
             category='testing')
    @CommandArgument('tests', default=["recommended"], nargs="...",
                     help="Specific tests to run")
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def test_dromaeo(self, tests, release, dev):
        return self.dromaeo_test_runner(tests, release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('compare_dromaeo',
             description='Compare outputs of two runs of ./mach test-dromaeo command',
             category='testing')
    @CommandArgument('params', default=None, nargs="...",
                     help=" filepaths of output files of two runs of dromaeo test ")
    def compare_dromaeo(self, params):
        prev_op_filename = params[0]
        cur_op_filename = params[1]
        result = {'Test': [], 'Prev_Time': [], 'Cur_Time': [], 'Difference(%)': []}
        with open(prev_op_filename, 'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
            l1 = prev_op.readline()
            l2 = cur_op.readline()

            while ((l1.find('[dromaeo] Saving...') and l2.find('[dromaeo] Saving...'))):
                l1 = prev_op.readline()
                l2 = cur_op.readline()

            reach = 3
            while (reach > 0):
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                reach -= 1

            while True:
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                if not l1:
                    break
                result['Test'].append(str(l1).split('|')[0].strip())
                result['Prev_Time'].append(float(str(l1).split('|')[1].strip()))
                result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
                a = float(str(l1).split('|')[1].strip())
                b = float(str(l2).split('|')[1].strip())
                result['Difference(%)'].append(((b - a) / a) * 100)

            width_col1 = max([len(x) for x in result['Test']])
            width_col2 = max([len(str(x)) for x in result['Prev_Time']])
            width_col3 = max([len(str(x)) for x in result['Cur_Time']])
            width_col4 = max([len(str(x)) for x in result['Difference(%)']])

            for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'], ['Difference(%)']):
                print("\033[1m" + "{}|{}|{}|{}".format(p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3),
                      s.ljust(width_col4)) + "\033[0m" + "\n" + "--------------------------------------------------"
                      + "-------------------------------------------------------------------------")

            for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'], result['Cur_Time'], result['Difference(%)']):
                if d1 > 0:
                    print("\033[91m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
                          str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
                elif d1 < 0:
                    print("\033[92m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
                          str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
                else:
                    print("{}|{}|{}|{}".format(a1.ljust(width_col1), str(b1).ljust(width_col2),
                          str(c1).ljust(width_col3), str(d1).ljust(width_col4)))

    def jquery_test_runner(self, cmd, release, dev):
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            check_call(
                ["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])

        # Run pull in case the jQuery repo was updated since last test run
        check_call(
            ["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return call([run_file, cmd, bin_path, base_dir])

    def dromaeo_test_runner(self, tests, release, dev):
        base_dir = path.abspath(path.join("tests", "dromaeo"))
        dromaeo_dir = path.join(base_dir, "dromaeo")
        run_file = path.join(base_dir, "run_dromaeo.py")

        # Clone the Dromaeo repository if it doesn't exist
        if not os.path.isdir(dromaeo_dir):
            check_call(
                ["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir])

        # Run pull in case the Dromaeo repo was updated since last test run
        check_call(
            ["git", "-C", dromaeo_dir, "pull"])

        # Compile test suite
        check_call(
            ["make", "-C", dromaeo_dir, "web"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return check_call(
            [run_file, "|".join(tests), bin_path, base_dir])
Example #7
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    def ensure_built_tests(self):
        if self.context.built_tests:
            return
        returncode = Registrar.dispatch('build-tests', context=self.context)
        if returncode:
            sys.exit(returncode)
        self.context.built_tests = True

    def find_test(self, prefix):
        target_contents = os.listdir(path.join(self.get_target_dir(), "debug"))
        for filename in target_contents:
            if filename.startswith(prefix + "-"):
                filepath = path.join(self.get_target_dir(), "debug", filename)

                if path.isfile(filepath) and os.access(filepath, os.X_OK):
                    return filepath

    def run_test(self, prefix, args=[]):
        t = self.find_test(prefix)
        if t:
            return subprocess.call([t] + args, env=self.build_env())

    @Command('test', description='Run all Servo tests', category='testing')
    @CommandArgument('params',
                     default=None,
                     nargs="...",
                     help="Optionally select test based on "
                     "test file directory")
    @CommandArgument('--render-mode',
                     '-rm',
                     default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. " +
                     HELP_RENDER_MODE)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False):
        suites = OrderedDict([
            ("tidy", {}),
            ("ref", {
                "kwargs": {
                    "kind": render_mode
                },
                "path": path.abspath(path.join("tests", "ref")),
                "include_arg": "name"
            }),
            ("wpt", {
                "kwargs": {
                    "release": release
                },
                "path":
                path.abspath(path.join("tests", "wpt", "web-platform-tests")),
                "include_arg":
                "include"
            }),
            ("css", {
                "kwargs": {
                    "release": release
                },
                "path": path.abspath(path.join("tests", "wpt", "css-tests")),
                "include_arg": "include"
            }),
            ("unit", {}),
        ])

        suites_by_prefix = {
            v["path"]: k
            for k, v in suites.iteritems() if "path" in v
        }

        selected_suites = OrderedDict()

        if params is None:
            params = suites.keys()

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True

            elif os.path.exists(path.abspath(arg)):
                abs_path = path.abspath(arg)
                for prefix, suite in suites_by_prefix.iteritems():
                    if abs_path.startswith(prefix):
                        if suite not in selected_suites:
                            selected_suites[suite] = []
                        selected_suites[suite].append(arg)
                        found = True
                        break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite,
                               context=self.context,
                               **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    @Command('test-unit', description='Run unit tests', category='testing')
    @CommandArgument('--package',
                     '-p',
                     default=None,
                     help="Specific package to test")
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern")
    def test_unit(self, test_name=None, package=None):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = [package]
        else:
            packages = os.listdir(
                path.join(self.context.topdir, "tests", "unit"))

        for crate in packages:
            result = subprocess.call(
                ["cargo", "test", "-p",
                 "%s_tests" % crate] + test_name,
                env=self.build_env(),
                cwd=self.servo_crate())
            if result != 0:
                return result

    @Command('test-ref',
             description='Run the reference tests',
             category='testing')
    @CommandArgument('--kind',
                     '-k',
                     default=DEFAULT_RENDER_MODE,
                     help=HELP_RENDER_MODE)
    @CommandArgument('--name',
                     default=None,
                     help="Only run tests that match this pattern. If the "
                     "path to the ref test directory is included, it "
                     "will automatically be trimmed out.")
    @CommandArgument(
        'servo_params',
        default=None,
        nargs=argparse.REMAINDER,
        help="Command-line arguments to be passed through to Servo")
    def test_ref(self, kind=DEFAULT_RENDER_MODE, name=None, servo_params=None):
        self.ensure_bootstrapped()
        self.ensure_built_tests()
        assert kind is not None, 'kind cannot be None, see help'

        kinds = ["cpu", "gpu"] if kind == 'both' else [kind]
        test_path = path.join(self.context.topdir, "tests", "ref")
        error = False

        test_start = time()
        for k in kinds:
            print("Running %s reftests..." % k)
            test_args = [k, test_path]
            if name is not None:
                maybe_path = path.normpath(name)
                ref_path = path.join("tests", "ref")

                # Check to see if we were passed something leading with the
                # path to the ref test directory, and trim it so that reftest
                # knows how to filter it.
                if ref_path in maybe_path:
                    test_args.append(path.relpath(maybe_path, ref_path))
                else:
                    test_args.append(name)
            if servo_params is not None:
                test_args += ["--"] + servo_params
            ret = self.run_test("reftest", test_args)
            error = error or ret != 0
        elapsed = time() - test_start

        print("Reference tests completed in %0.2fs" % elapsed)

        if error:
            return 1

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    def test_tidy(self):
        return tidy.scan()

    @Command('test-wpt-failure',
             description='Run the web platform tests',
             category='testing')
    def test_wpt_failure(self):
        self.ensure_bootstrapped()
        return not subprocess.call([
            "bash",
            path.join("tests", "wpt", "run.sh"), "--no-pause-after-test",
            "--include", "infrastructure/failing-test.html"
        ],
                                   env=self.build_env())

    @Command('test-wpt',
             description='Run the web platform tests',
             category='testing',
             parser=wptcommandline.create_parser)
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        hosts_file_path = path.join('tests', 'wpt', 'hosts')

        os.environ["hosts_file_path"] = hosts_file_path

        run_file = path.abspath(path.join("tests", "wpt", "run_wpt.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release',
                     '-r',
                     action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev',
                     '-d',
                     action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('test-css',
             description='Run the web platform tests',
             category='testing',
             parser=wptcommandline.create_parser())
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_css(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()

        run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-css',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_css(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    def ensure_wpt_virtualenv(self):
        virtualenv_path = path.join("tests", "wpt", "_virtualenv")
        python = self.get_exec("python2", "python")

        if not os.path.exists(virtualenv_path):
            virtualenv = self.get_exec("virtualenv2", "virtualenv")
            subprocess.check_call([virtualenv, "-p", python, virtualenv_path])

        activate_path = path.join(virtualenv_path, "bin", "activate_this.py")

        execfile(activate_path, dict(__file__=activate_path))

        try:
            import wptrunner  # noqa
            from wptrunner.browsers import servo  # noqa
        except ImportError:
            subprocess.check_call([
                "pip", "install", "-r",
                path.join("tests", "wpt", "harness", "requirements.txt")
            ])
            subprocess.check_call([
                "pip", "install", "-r",
                path.join("tests", "wpt", "harness", "requirements_servo.txt")
            ])
        try:
            import blessings
        except ImportError:
            subprocess.check_call(["pip", "install", "blessings"])

        # This is an unfortunate hack. Because mozlog gets imported by wptcommandline
        # before the virtualenv is initalised it doesn't see the blessings module so we don't
        # get coloured output. Setting the blessings global explicitly fixes that.
        from mozlog.structured.formatters import machformatter
        import blessings  # noqa
        machformatter.blessings = blessings

    def get_exec(self, name, default=None):
        path = find_executable(name)
        if not path:
            return default

        return path

    def jquery_test_runner(self, cmd, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            subprocess.check_call([
                "git", "clone", "-b", "servo", "--depth", "1",
                "https://github.com/servo/jquery", jquery_dir
            ])

        # Run pull in case the jQuery repo was updated since last test run
        subprocess.check_call(["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return subprocess.check_call([run_file, cmd, bin_path, base_dir])
def create_parser_update():
    from update import updatecommandline
    return updatecommandline.create_parser()
Example #9
0
class MachCommands(CommandBase):
    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    def ensure_built_tests(self):
        if self.context.built_tests:
            return
        returncode = Registrar.dispatch('build-tests', context=self.context)
        if returncode:
            sys.exit(returncode)
        self.context.built_tests = True

    def find_test(self, prefix):
        target_contents = os.listdir(
            path.join(self.context.topdir, "components", "servo", "target",
                      "debug"))
        for filename in target_contents:
            if filename.startswith(prefix + "-"):
                filepath = path.join(self.context.topdir, "components",
                                     "servo", "target", "debug", filename)

                if path.isfile(filepath) and os.access(filepath, os.X_OK):
                    return filepath

    def run_test(self, prefix, args=[]):
        t = self.find_test(prefix)
        if t:
            return subprocess.call([t] + args, env=self.build_env())

    def infer_test_by_dir(self, params):
        maybe_path = path.normpath(params[0])
        mach_command = path.join(self.context.topdir, "mach")
        args = None

        if not path.exists(maybe_path):
            print("%s is not a valid file or directory" % maybe_path)
            return 1

        test_dirs = [
            # path, mach test command, optional flag for path argument
            (path.join("tests", "wpt"), "test-wpt", None),
            (path.join("tests", "ref"), "test-ref", ["--name"]),
        ]

        for test_dir, test_name, path_flag in test_dirs:
            if not path_flag:
                path_flag = []
            if test_dir in maybe_path:
                args = ([mach_command, test_name] + path_flag + [maybe_path] +
                        params[1:])
                break
        else:
            print("%s is not a valid test file or directory" % maybe_path)
            return 1

        return subprocess.call(args, env=self.build_env())

    @Command('test', description='Run all Servo tests', category='testing')
    @CommandArgument('params',
                     default=None,
                     nargs="...",
                     help="Optionally select test based on "
                     "test file directory")
    def test(self, params):
        if params:
            return self.infer_test_by_dir(params)

        test_start = time()
        for t in ["tidy", "ref", "wpt", "css", "unit"]:
            Registrar.dispatch("test-%s" % t, context=self.context)
        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    @Command('test-unit', description='Run unit tests', category='testing')
    @CommandArgument('test_name',
                     nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern")
    def test_unit(self, test_name=None, component=None, package=None):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        return 0 != subprocess.call(
            ["cargo", "test", "-p", "unit_tests"] + test_name,
            env=self.build_env(),
            cwd=self.servo_crate())

    @Command('test-ref',
             description='Run the reference tests',
             category='testing')
    @CommandArgument('--kind',
                     '-k',
                     default=None,
                     help="'cpu' or 'gpu' (default both)")
    @CommandArgument('--name',
                     default=None,
                     help="Only run tests that match this pattern. If the "
                     "path to the ref test directory is included, it "
                     "will automatically be trimmed out.")
    @CommandArgument(
        'servo_params',
        default=None,
        nargs=argparse.REMAINDER,
        help="Command-line arguments to be passed through to Servo")
    def test_ref(self, kind=None, name=None, servo_params=None):
        self.ensure_bootstrapped()
        self.ensure_built_tests()

        kinds = ["cpu", "gpu"] if kind is None else [kind]
        test_path = path.join(self.context.topdir, "tests", "ref")
        error = False

        test_start = time()
        for k in kinds:
            print("Running %s reftests..." % k)
            test_args = [k, test_path]
            if name is not None:
                maybe_path = path.normpath(name)
                ref_path = path.join("tests", "ref")

                # Check to see if we were passed something leading with the
                # path to the ref test directory, and trim it so that reftest
                # knows how to filter it.
                if ref_path in maybe_path:
                    test_args.append(path.relpath(maybe_path, ref_path))
                else:
                    test_args.append(name)
            if servo_params is not None:
                test_args += ["--"] + servo_params
            ret = self.run_test("reftest", test_args)
            error = error or ret != 0
        elapsed = time() - test_start

        print("Reference tests completed in %0.2fs" % elapsed)

        if error:
            return 1

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    def test_tidy(self):
        return tidy.scan()

    @Command('test-wpt-failure',
             description='Run the web platform tests',
             category='testing')
    def test_wpt_failure(self):
        self.ensure_bootstrapped()
        return not subprocess.call([
            "bash",
            path.join("tests", "wpt", "run.sh"), "--no-pause-after-test",
            "--include", "infrastructure/failing-test.html"
        ],
                                   env=self.build_env())

    @Command('test-wpt',
             description='Run the web platform tests',
             category='testing',
             parser=wptcommandline.create_parser())
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        hosts_file_path = path.join('tests', 'wpt', 'hosts')

        os.environ["hosts_file_path"] = hosts_file_path

        run_file = path.abspath(path.join("tests", "wpt", "run_wpt.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('test-css',
             description='Run the web platform tests',
             category='testing',
             parser=wptcommandline.create_parser())
    @CommandArgument('--release',
                     default=False,
                     action="store_true",
                     help="Run with a release build of servo")
    def test_css(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()

        run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-css',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_css(self, **kwargs):
        self.ensure_bootstrapped()
        self.ensure_wpt_virtualenv()
        run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    def ensure_wpt_virtualenv(self):
        virtualenv_path = path.join("tests", "wpt", "_virtualenv")
        python = self.get_exec("python2", "python")

        if not os.path.exists(virtualenv_path):
            virtualenv = self.get_exec("virtualenv2", "virtualenv")
            subprocess.check_call([virtualenv, "-p", python, virtualenv_path])

        activate_path = path.join(virtualenv_path, "bin", "activate_this.py")

        execfile(activate_path, dict(__file__=activate_path))

        try:
            import wptrunner
            from wptrunner.browsers import servo
        except ImportError:
            subprocess.check_call([
                "pip", "install", "-r",
                path.join("tests", "wpt", "harness", "requirements.txt")
            ])
            subprocess.check_call([
                "pip", "install", "-r",
                path.join("tests", "wpt", "harness", "requirements_servo.txt")
            ])
        try:
            import blessings
        except ImportError:
            subprocess.check_call(["pip", "install", "blessings"])

        # This is an unfortunate hack. Because mozlog gets imported by wptcommandline
        # before the virtualenv is initalised it doesn't see the blessings module so we don't
        # get coloured output. Setting the blessings global explicitly fixes that.
        from mozlog.structured.formatters import machformatter
        import blessings
        machformatter.blessings = blessings

    def get_exec(self, name, default=None):
        path = find_executable(name)
        if not path:
            return default

        return path
Example #10
0
class MachCommands(CommandBase):
    DEFAULT_RENDER_MODE = "cpu"
    HELP_RENDER_MODE = "Value can be 'cpu', 'gpu' or 'both' (default " + DEFAULT_RENDER_MODE + ")"

    def __init__(self, context):
        CommandBase.__init__(self, context)
        if not hasattr(self.context, "built_tests"):
            self.context.built_tests = False

    @Command('test',
             description='Run all Servo tests',
             category='testing')
    @CommandArgument('params', default=None, nargs="...",
                     help="Optionally select test based on "
                          "test file directory")
    @CommandArgument('--render-mode', '-rm', default=DEFAULT_RENDER_MODE,
                     help="The render mode to be used on all tests. " +
                          HELP_RENDER_MODE)
    @CommandArgument('--release', default=False, action="store_true",
                     help="Run with a release build of servo")
    @CommandArgument('--faster', default=False, action="store_true",
                     help="Only check changed files and skip the WPT lint in tidy")
    @CommandArgument('--no-progress', default=False, action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test', default=False, action="store_true",
                     help="Run unit tests for tidy")
    def test(self, params, render_mode=DEFAULT_RENDER_MODE, release=False, faster=False, no_progress=False,
             self_test=False):
        suites = OrderedDict([
            ("tidy", {"kwargs": {"faster": faster, "no_progress": no_progress, "self_test": self_test},
                      "include_arg": "include"}),
            ("wpt", {"kwargs": {"release": release},
                     "paths": [path.abspath(path.join("tests", "wpt", "web-platform-tests")),
                               path.abspath(path.join("tests", "wpt", "mozilla"))],
                     "include_arg": "include"}),
            ("css", {"kwargs": {"release": release},
                     "paths": [path.abspath(path.join("tests", "wpt", "css-tests"))],
                     "include_arg": "include"}),
            ("unit", {"kwargs": {},
                      "paths": [path.abspath(path.join("tests", "unit"))],
                      "include_arg": "test_name"}),
            ("compiletest", {"kwargs": {"release": release},
                             "paths": [path.abspath(path.join("tests", "compiletest"))],
                             "include_arg": "test_name"})
        ])

        suites_by_prefix = {path: k for k, v in suites.iteritems() if "paths" in v for path in v["paths"]}

        selected_suites = OrderedDict()

        if params is None:
            params = suites.keys()

        for arg in params:
            found = False
            if arg in suites and arg not in selected_suites:
                selected_suites[arg] = []
                found = True

            elif os.path.exists(path.abspath(arg)):
                abs_path = path.abspath(arg)
                for prefix, suite in suites_by_prefix.iteritems():
                    if abs_path.startswith(prefix):
                        if suite not in selected_suites:
                            selected_suites[suite] = []
                        selected_suites[suite].append(arg)
                        found = True
                        break

            if not found:
                print("%s is not a valid test path or suite name" % arg)
                return 1

        test_start = time()
        for suite, tests in selected_suites.iteritems():
            props = suites[suite]
            kwargs = props.get("kwargs", {})
            if tests:
                kwargs[props["include_arg"]] = tests

            Registrar.dispatch("test-%s" % suite, context=self.context, **kwargs)

        elapsed = time() - test_start

        print("Tests completed in %0.2fs" % elapsed)

    @Command('test-unit',
             description='Run unit tests',
             category='testing')
    @CommandArgument('--package', '-p', default=None, help="Specific package to test")
    @CommandArgument('test_name', nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path")
    def test_unit(self, test_name=None, package=None):
        subprocess.check_output([
            sys.executable,
            path.join(self.context.topdir, "components", "style", "list_properties.py")
        ])

        this_file = os.path.dirname(__file__)
        servo_doc_path = os.path.abspath(os.path.join(this_file, '../', '../', 'target', 'doc', 'servo'))

        with open(os.path.join(servo_doc_path, 'css-properties.json'), 'r') as property_file:
            properties = json.loads(property_file.read())

        assert len(properties) >= 100
        assert "margin-top" in properties
        assert "margin" in properties

        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/unit/<package>'
            match = re.search("tests/unit/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/unit/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        if not packages:
            packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit")))

        args = ["cargo", "test"]
        for crate in packages:
            args += ["-p", "%s_tests" % crate]
        args += test_patterns

        env = self.build_env()
        env["RUST_BACKTRACE"] = "1"

        result = call(args, env=env, cwd=self.servo_crate())
        if result != 0:
            return result

    @Command('test-compiletest',
             description='Run compiletests',
             category='testing')
    @CommandArgument('--package', '-p', default=None, help="Specific package to test")
    @CommandArgument('test_name', nargs=argparse.REMAINDER,
                     help="Only run tests that match this pattern or file path")
    @CommandArgument('--release', default=False, action="store_true",
                     help="Run with a release build of servo")
    def test_compiletest(self, test_name=None, package=None, release=False):
        if test_name is None:
            test_name = []

        self.ensure_bootstrapped()

        if package:
            packages = {package}
        else:
            packages = set()

        test_patterns = []
        for test in test_name:
            # add package if 'tests/compiletest/<package>'
            match = re.search("tests/compiletest/(\\w+)/?$", test)
            if match:
                packages.add(match.group(1))
            # add package & test if '<package>/<test>', 'tests/compiletest/<package>/<test>.rs', or similar
            elif re.search("\\w/\\w", test):
                tokens = test.split("/")
                packages.add(tokens[-2])
                test_prefix = tokens[-1]
                if test_prefix.endswith(".rs"):
                    test_prefix = test_prefix[:-3]
                test_prefix += "::"
                test_patterns.append(test_prefix)
            # add test as-is otherwise
            else:
                test_patterns.append(test)

        if not packages:
            packages = set(os.listdir(path.join(self.context.topdir, "tests", "compiletest")))

        packages.remove("helper")

        args = ["cargo", "test"]
        for crate in packages:
            args += ["-p", "%s_compiletest" % crate]
        args += test_patterns

        env = self.build_env()
        if release:
            env["BUILD_MODE"] = "release"
            args += ["--release"]
        else:
            env["BUILD_MODE"] = "debug"

        result = call(args, env=env, cwd=self.servo_crate())
        if result != 0:
            return result

    @Command('test-content',
             description='Run the content tests',
             category='testing')
    def test_content(self):
        print("Content tests have been replaced by web-platform-tests under "
              "tests/wpt/mozilla/.")
        return 0

    @Command('test-tidy',
             description='Run the source code tidiness check',
             category='testing')
    @CommandArgument('--faster', default=False, action="store_true",
                     help="Only check changed files and skip the WPT lint in tidy, "
                          "if there are no changes in the WPT files")
    @CommandArgument('--no-progress', default=False, action="store_true",
                     help="Don't show progress for tidy")
    @CommandArgument('--self-test', default=False, action="store_true",
                     help="Run unit tests for tidy")
    def test_tidy(self, faster, no_progress, self_test):
        if self_test:
            return test_tidy.do_tests()
        else:
            return tidy.scan(faster, not no_progress)

    @Command('test-webidl',
             description='Run the WebIDL parser tests',
             category='testing')
    @CommandArgument('--quiet', '-q', default=False, action="store_true",
                     help="Don't print passing tests.")
    @CommandArgument('tests', default=None, nargs="...",
                     help="Specific tests to run, relative to the tests directory")
    def test_webidl(self, quiet, tests):
        self.ensure_bootstrapped()

        test_file_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH, "components", "script",
                                               "dom", "bindings", "codegen", "parser"))
        # For the `import WebIDL` in runtests.py
        sys.path.insert(0, test_file_dir)

        run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)

        verbose = not quiet
        return run_globals["run_tests"](tests, verbose)

    @Command('test-wpt-failure',
             description='Run the web platform tests',
             category='testing')
    def test_wpt_failure(self):
        self.ensure_bootstrapped()
        return not call([
            "bash",
            path.join("tests", "wpt", "run.sh"),
            "--no-pause-after-test",
            "--include",
            "infrastructure/failing-test.html"
        ], env=self.build_env())

    @Command('test-wpt',
             description='Run the web platform tests',
             category='testing',
             parser=create_parser_wpt)
    def test_wpt(self, **kwargs):
        self.ensure_bootstrapped()
        hosts_file_path = path.join(self.context.topdir, 'tests', 'wpt', 'hosts')
        os.environ["hosts_file_path"] = hosts_file_path
        run_file = path.abspath(path.join(self.context.topdir, "tests", "wpt", "run_wpt.py"))
        return self.wptrunner(run_file, **kwargs)

    # Helper for test_css and test_wpt:
    def wptrunner(self, run_file, **kwargs):
        os.environ["RUST_BACKTRACE"] = "1"
        kwargs["debug"] = not kwargs["release"]
        if kwargs.pop("chaos"):
            kwargs["debugger"] = "rr"
            kwargs["debugger_args"] = "record --chaos"
            kwargs["repeat_until_unexpected"] = True
            # TODO: Delete rr traces from green test runs?

        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["run_tests"](**kwargs)

    @Command('update-wpt',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    @CommandArgument('--patch', action='store_true', default=False,
                     help='Create an mq patch or git commit containing the changes')
    def update_wpt(self, patch, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update.py"))
        kwargs["no_patch"] = not patch
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('test-jquery',
             description='Run the jQuery test suite',
             category='testing')
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def test_jquery(self, release, dev):
        return self.jquery_test_runner("test", release, dev)

    @Command('test-dromaeo',
             description='Run the Dromaeo test suite',
             category='testing')
    @CommandArgument('tests', default=["recommended"], nargs="...",
                     help="Specific tests to run")
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def test_dromaeo(self, tests, release, dev):
        return self.dromaeo_test_runner(tests, release, dev)

    @Command('update-jquery',
             description='Update the jQuery test suite expected results',
             category='testing')
    @CommandArgument('--release', '-r', action='store_true',
                     help='Run the release build')
    @CommandArgument('--dev', '-d', action='store_true',
                     help='Run the dev build')
    def update_jquery(self, release, dev):
        return self.jquery_test_runner("update", release, dev)

    @Command('test-css',
             description='Run the web platform tests',
             category='testing',
             parser=create_parser_wpt)
    def test_css(self, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "run_css.py"))
        return self.wptrunner(run_file, **kwargs)

    @Command('update-css',
             description='Update the web platform tests',
             category='testing',
             parser=updatecommandline.create_parser())
    def update_css(self, **kwargs):
        self.ensure_bootstrapped()
        run_file = path.abspath(path.join("tests", "wpt", "update_css.py"))
        run_globals = {"__file__": run_file}
        execfile(run_file, run_globals)
        return run_globals["update_tests"](**kwargs)

    @Command('compare_dromaeo',
             description='compare outputs of two runs of ./mach test-dromaeo command',
             category='testing')
    @CommandArgument('params', default=None, nargs="...",
                     help=" filepaths of output files of two runs of dromaeo test ")
    def compare_dromaeo(self, params):
        prev_op_filename = params[0]
        cur_op_filename = params[1]
        result = {'Test': [], 'Prev_Time': [], 'Cur_Time': [], 'Difference(%)': []}
        with open(prev_op_filename, 'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
            l1 = prev_op.readline()
            l2 = cur_op.readline()

            while ((l1.find('[dromaeo] Saving...') and l2.find('[dromaeo] Saving...'))):
                l1 = prev_op.readline()
                l2 = cur_op.readline()

            reach = 3
            while (reach > 0):
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                reach -= 1

            while True:
                l1 = prev_op.readline()
                l2 = cur_op.readline()
                if not l1:
                    break
                result['Test'].append(str(l1).split('|')[0].strip())
                result['Prev_Time'].append(float(str(l1).split('|')[1].strip()))
                result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
                a = float(str(l1).split('|')[1].strip())
                b = float(str(l2).split('|')[1].strip())
                result['Difference(%)'].append(((b - a) / a) * 100)

            width_col1 = max([len(x) for x in result['Test']])
            width_col2 = max([len(str(x)) for x in result['Prev_Time']])
            width_col3 = max([len(str(x)) for x in result['Cur_Time']])
            width_col4 = max([len(str(x)) for x in result['Difference(%)']])

            for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'], ['Difference(%)']):
                print ("\033[1m" + "{}|{}|{}|{}".format(p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3),
                       s.ljust(width_col4)) + "\033[0m" + "\n" + "--------------------------------------------------"
                       + "-------------------------------------------------------------------------")

            for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'], result['Cur_Time'], result['Difference(%)']):
                if d1 > 0:
                    print ("\033[91m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
                           str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
                elif d1 < 0:
                    print ("\033[92m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
                           str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
                else:
                    print ("{}|{}|{}|{}".format(a1.ljust(width_col1), str(b1).ljust(width_col2),
                           str(c1).ljust(width_col3), str(d1).ljust(width_col4)))

    def jquery_test_runner(self, cmd, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "jquery"))
        jquery_dir = path.join(base_dir, "jquery")
        run_file = path.join(base_dir, "run_jquery.py")

        # Clone the jQuery repository if it doesn't exist
        if not os.path.isdir(jquery_dir):
            check_call(
                ["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])

        # Run pull in case the jQuery repo was updated since last test run
        check_call(
            ["git", "-C", jquery_dir, "pull"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return call([run_file, cmd, bin_path, base_dir])

    def dromaeo_test_runner(self, tests, release, dev):
        self.ensure_bootstrapped()
        base_dir = path.abspath(path.join("tests", "dromaeo"))
        dromaeo_dir = path.join(base_dir, "dromaeo")
        run_file = path.join(base_dir, "run_dromaeo.py")

        # Clone the Dromaeo repository if it doesn't exist
        if not os.path.isdir(dromaeo_dir):
            check_call(
                ["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir])

        # Run pull in case the Dromaeo repo was updated since last test run
        check_call(
            ["git", "-C", dromaeo_dir, "pull"])

        # Compile test suite
        check_call(
            ["make", "-C", dromaeo_dir, "web"])

        # Check that a release servo build exists
        bin_path = path.abspath(self.get_binary_path(release, dev))

        return check_call(
            [run_file, "|".join(tests), bin_path, base_dir])