Пример #1
0
def test_update_args():
    args = {"perfherder_metrics": [{"name": "yey"}]}
    info = ScriptInfo(EXAMPLE_TEST)
    new_args = info.update_args(**args)

    # arguments should not be overriden
    assert new_args["perfherder_metrics"] == [{"name": "yey"}]

    # arguments in platform-specific options should
    # override default options
    assert new_args["verbose"]
Пример #2
0
    def _display(task):
        from mozperftest.script import ScriptInfo

        path = Path(task["path"])
        script_info = ScriptInfo(str(path))
        flavor = script_info.script_type.name
        if flavor == "browsertime":
            flavor = "bt"
        tags = script_info.get("tags", [])

        location = str(path.parent).replace(str(SRC_ROOT), "").strip("/")
        if len(tags) > 0:
            return f"[{flavor}][{','.join(tags)}] {path.name} in {location}"
        return f"[{flavor}] {path.name} in {location}"
Пример #3
0
def test_scriptinfo_bt():
    info = ScriptInfo(EXAMPLE_TEST)
    assert info["author"] == "N/A"
    display = str(info)
    assert "The description of the example test." in display
    assert info.script_type == ScriptType.browsertime
    check_options(info)
Пример #4
0
    def get_test_list(self):
        """
        Returns a dictionary containing the tests that are in perftest.ini manifest.

        :return dict: A dictionary with the following structure: {
                "suite_name": {
                    'perftest_test1',
                    'perftest_test2',
                },
            }
        """
        for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"):
            suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path))

            # Get the tests from perftest.ini
            test_manifest = TestManifest([str(path)], strict=False)
            test_list = test_manifest.active_tests(exists=False,
                                                   disabled=False)
            for test in test_list:
                si = ScriptInfo(test["path"])
                self.script_infos[si["name"]] = si
                self._test_list.setdefault(suite_name,
                                           {}).update({si["name"]: ""})

        return self._test_list
Пример #5
0
    def get_test_list(self):
        """
        Returns a dictionary containing the tests that are in perftest.ini manifest.

        :return dict: A dictionary with the following structure: {
                "suite_name": {
                    'perftest_test1',
                    'perftest_test2',
                },
            }
        """
        for path in pathlib.Path(self.workspace_dir).rglob("perftest.ini"):
            suite_name = re.sub(self.workspace_dir, "", os.path.dirname(path))

            # If the workspace dir doesn't end with a forward-slash,
            # the substitution above won't work completely
            if suite_name.startswith("/") or suite_name.startswith("\\"):
                suite_name = suite_name[1:]

            # We have to add new paths to the logger as we search
            # because mozperftest tests exist in multiple places in-tree
            PerfDocLogger.PATHS.append(suite_name)

            # Get the tests from perftest.ini
            test_manifest = TestManifest([str(path)], strict=False)
            test_list = test_manifest.active_tests(exists=False,
                                                   disabled=False)
            for test in test_list:
                si = ScriptInfo(test["path"])
                self.script_infos[si["name"]] = si
                self._test_list.setdefault(suite_name,
                                           {}).update({si["name"]: str(path)})

        return self._test_list
Пример #6
0
def get_running_env(**kwargs):
    from mozbuild.base import MozbuildObject

    config = MozbuildObject.from_environment()
    mach_cmd = MagicMock()

    def get_binary_path(*args):
        return ""

    mach_cmd.get_binary_path = get_binary_path
    mach_cmd.topsrcdir = config.topsrcdir
    mach_cmd.topobjdir = config.topobjdir
    mach_cmd.distdir = config.distdir
    mach_cmd._mach_context = MagicMock()
    mach_cmd._mach_context.state_dir = tempfile.mkdtemp()
    mach_cmd.run_process.return_value = 0

    mach_args = {
        "flavor": "desktop-browser",
        "test_objects": None,
        "resolve_tests": True,
        "browsertime-clobber": False,
        "browsertime-install-url": None,
    }
    mach_args.update(kwargs)
    hooks = Hooks(mach_cmd, mach_args.pop("hooks", None))
    tests = mach_args.get("tests", [])
    if len(tests) > 0:
        script = ScriptInfo(tests[0])
    else:
        script = None
    env = MachEnvironment(mach_cmd, hooks=hooks, **mach_args)
    metadata = Metadata(mach_cmd, env, "desktop-browser", script)
    return mach_cmd, metadata, env
Пример #7
0
def test_scriptinfo_xpcshell(script):
    info = ScriptInfo(script)
    assert info["author"] == "N/A"

    display = str(info)
    assert "The description of the example test." in display
    assert info.script_type == ScriptType.xpcshell
    check_options(info)
Пример #8
0
def get_running_env(**kwargs):
    from mozbuild.base import MozbuildObject

    config = MozbuildObject.from_environment()
    mach_cmd = MagicMock()

    def get_binary_path(*args):
        return ""

    def run_pip(args):
        pip = Path(sys.executable).parent / "pip"
        subprocess.check_call(
            [str(pip)] + args,
            stderr=subprocess.STDOUT,
            cwd=config.topsrcdir,
            universal_newlines=True,
        )

    mach_cmd.get_binary_path = get_binary_path
    mach_cmd.topsrcdir = config.topsrcdir
    mach_cmd.topobjdir = config.topobjdir
    mach_cmd.distdir = config.distdir
    mach_cmd._mach_context = MagicMock()
    mach_cmd._mach_context.state_dir = tempfile.mkdtemp()
    mach_cmd.run_process.return_value = 0
    mach_cmd.virtualenv_manager = MagicMock()
    mach_cmd.virtualenv_manager.python_path = sys.executable
    mach_cmd.virtualenv_manager.bin_path = Path(sys.executable).parent
    mach_cmd.virtualenv_manager._run_pip = run_pip

    mach_args = {
        "flavor": "desktop-browser",
        "test_objects": None,
        "resolve_tests": True,
        "browsertime-clobber": False,
        "browsertime-install-url": None,
    }
    mach_args.update(kwargs)
    hooks = Hooks(mach_cmd, mach_args.pop("hooks", None))
    tests = mach_args.get("tests", [])
    if len(tests) > 0:
        script = ScriptInfo(tests[0])
    else:
        script = None
    env = MachEnvironment(mach_cmd, hooks=hooks, **mach_args)
    metadata = Metadata(mach_cmd, env, "desktop-browser", script)
    return mach_cmd, metadata, env
Пример #9
0
    def run_perftest(self, **kwargs):
        # original parser that brought us there
        original_parser = self.get_parser()

        from pathlib import Path

        # user selection with fuzzy UI
        from mozperftest.utils import ON_TRY
        from mozperftest.script import ScriptInfo, ScriptType, ParseError

        if not ON_TRY and kwargs.get("tests", []) == []:
            from moztest.resolve import TestResolver
            from mozperftest.fzf.fzf import select

            resolver = self._spawn(TestResolver)
            test_objects = list(
                resolver.resolve_tests(paths=None, flavor="perftest"))
            selected = select(test_objects)

            def full_path(selection):
                __, script_name, __, location = selection.split(" ")
                return str(
                    Path(
                        self.topsrcdir.rstrip(os.sep),
                        location.strip(os.sep),
                        script_name,
                    ))

            kwargs["tests"] = [full_path(s) for s in selected]

            if kwargs["tests"] == []:
                print("\nNo selection. Bye!")
                return

        if len(kwargs["tests"]) > 1:
            print("\nSorry no support yet for multiple local perftest")
            return

        sel = "\n".join(kwargs["tests"])
        print("\nGood job! Best selection.\n%s" % sel)
        # if the script is xpcshell, we can force the flavor here
        # XXX on multi-selection,  what happens if we have seeveral flavors?
        try:
            script_info = ScriptInfo(kwargs["tests"][0])
        except ParseError as e:
            if e.exception is IsADirectoryError:
                script_info = None
            else:
                raise
        else:
            if script_info.script_type == ScriptType.xpcshell:
                kwargs["flavor"] = script_info.script_type.name
            else:
                # we set the value only if not provided (so "mobile-browser"
                # can be picked)
                if "flavor" not in kwargs:
                    kwargs["flavor"] = "desktop-browser"

        push_to_try = kwargs.pop("push_to_try", False)
        if push_to_try:
            sys.path.append(str(Path(self.topsrcdir, "tools", "tryselect")))

            from tryselect.push import push_to_try

            perftest_parameters = {}
            args = script_info.update_args(
                **original_parser.get_user_args(kwargs))
            platform = args.pop("try_platform", "linux")
            if isinstance(platform, str):
                platform = [platform]

            platform = [
                "%s-%s" % (plat, script_info.script_type.name)
                for plat in platform
            ]

            for plat in platform:
                if plat not in _TRY_PLATFORMS:
                    # we can extend platform support here: linux, win, macOs, pixel2
                    # by adding more jobs in taskcluster/ci/perftest/kind.yml
                    # then picking up the right one here
                    raise NotImplementedError(
                        "%r doesn't exist or is not yet supported" % plat)

            def relative(path):
                if path.startswith(self.topsrcdir):
                    return path[len(self.topsrcdir):].lstrip(os.sep)
                return path

            for name, value in args.items():
                # ignore values that are set to default
                if original_parser.get_default(name) == value:
                    continue
                if name == "tests":
                    value = [relative(path) for path in value]
                perftest_parameters[name] = value

            parameters = {
                "try_task_config": {
                    "tasks": [_TRY_PLATFORMS[plat] for plat in platform],
                    "perftest-options": perftest_parameters,
                },
                "try_mode": "try_task_config",
            }

            task_config = {"parameters": parameters, "version": 2}
            if args["verbose"]:
                print("Pushing run to try...")
                print(json.dumps(task_config, indent=4, sort_keys=True))

            push_to_try("perftest", "perftest", try_task_config=task_config)
            return

        from mozperftest.runner import run_tests

        run_tests(self, kwargs, original_parser.get_user_args(kwargs))

        print("\nFirefox. Fast For Good.\n")
Пример #10
0
def test_update_args_metrics_json_failure():
    args = {"perfherder_metrics": ["yey"]}
    info = ScriptInfo(EXAMPLE_TEST)

    with pytest.raises(BadOptionTypeError):
        info.update_args(**args)
Пример #11
0
def test_scriptinfo_failure():
    bad_example = HERE / "data" / "failing-samples" / "perftest_doc_failure_example.js"
    with pytest.raises(MissingFieldError):
        ScriptInfo(bad_example)
Пример #12
0
def run_tests(mach_cmd, kwargs, client_args):
    """This tests runner can be used directly via main or via Mach.

    When the --on-try option is used, the test runner looks at the
    `PERFTEST_OPTIONS` environment variable that contains all options passed by
    the user via a ./mach perftest --push-to-try call.
    """
    _setup_path()
    on_try = kwargs.pop("on_try", False)

    # trying to get the arguments from the task params
    if on_try:
        try_options = json.loads(os.environ["PERFTEST_OPTIONS"])
        print("Loading options from $PERFTEST_OPTIONS")
        print(json.dumps(try_options, indent=4, sort_keys=True))
        kwargs.update(try_options)

    from mozperftest.utils import build_test_list
    from mozperftest import MachEnvironment, Metadata
    from mozperftest.hooks import Hooks
    from mozperftest.script import ScriptInfo

    hooks_file = kwargs.pop("hooks", None)
    hooks = Hooks(mach_cmd, hooks_file)
    verbose = kwargs.get("verbose", False)
    log_level = logging.DEBUG if verbose else logging.INFO

    # If we run through mach, we just  want to set the level
    # of the existing termminal handler.
    # Otherwise, we're adding it.
    if mach_cmd.log_manager.terminal_handler is not None:
        mach_cmd.log_manager.terminal_handler.level = log_level
    else:
        mach_cmd.log_manager.add_terminal_logging(level=log_level)
        mach_cmd.log_manager.enable_all_structured_loggers()
        mach_cmd.log_manager.enable_unstructured()

    try:
        # Only pass the virtualenv to the before_iterations hook
        # so that users can install test-specific packages if needed.
        mach_cmd.activate_virtualenv()
        kwargs["virtualenv"] = mach_cmd.virtualenv_manager
        hooks.run("before_iterations", kwargs)
        del kwargs["virtualenv"]

        tests, tmp_dir = build_test_list(kwargs["tests"])

        for test in tests:
            script = ScriptInfo(test)

            # update the arguments with options found in the script, if any
            args = script.update_args(**client_args)
            # XXX this should be the default pool for update_args
            for key, value in kwargs.items():
                if key not in args:
                    args[key] = value

            # update the hooks, or use a copy of the general one
            script_hooks = Hooks(mach_cmd, args.pop("hooks", hooks_file))

            flavor = args["flavor"]
            if flavor == "doc":
                print(script)
                continue

            for iteration in range(args.get("test_iterations", 1)):
                try:
                    env = MachEnvironment(mach_cmd, hooks=script_hooks, **args)
                    metadata = Metadata(mach_cmd, env, flavor, script)
                    script_hooks.run("before_runs", env)
                    try:
                        with env.frozen() as e:
                            e.run(metadata)
                    finally:
                        script_hooks.run("after_runs", env)
                finally:
                    if tmp_dir is not None:
                        shutil.rmtree(tmp_dir)
    finally:
        hooks.cleanup()