def test_update_args(): args = {"perfherder_metrics": [{"name": "yey"}]} info = ScriptInfo(EXAMPLE_TEST) new_args = info.update_args(**args) # arguments should not be overriden assert new_args["perfherder_metrics"] == [{"name": "yey"}] # arguments in platform-specific options should # override default options assert new_args["verbose"]
def run_perftest(self, **kwargs): # original parser that brought us there original_parser = self.get_parser() from pathlib import Path # user selection with fuzzy UI from mozperftest.utils import ON_TRY from mozperftest.script import ScriptInfo, ScriptType, ParseError if not ON_TRY and kwargs.get("tests", []) == []: from moztest.resolve import TestResolver from mozperftest.fzf.fzf import select resolver = self._spawn(TestResolver) test_objects = list( resolver.resolve_tests(paths=None, flavor="perftest")) selected = select(test_objects) def full_path(selection): __, script_name, __, location = selection.split(" ") return str( Path( self.topsrcdir.rstrip(os.sep), location.strip(os.sep), script_name, )) kwargs["tests"] = [full_path(s) for s in selected] if kwargs["tests"] == []: print("\nNo selection. Bye!") return if len(kwargs["tests"]) > 1: print("\nSorry no support yet for multiple local perftest") return sel = "\n".join(kwargs["tests"]) print("\nGood job! Best selection.\n%s" % sel) # if the script is xpcshell, we can force the flavor here # XXX on multi-selection, what happens if we have seeveral flavors? try: script_info = ScriptInfo(kwargs["tests"][0]) except ParseError as e: if e.exception is IsADirectoryError: script_info = None else: raise else: if script_info.script_type == ScriptType.xpcshell: kwargs["flavor"] = script_info.script_type.name else: # we set the value only if not provided (so "mobile-browser" # can be picked) if "flavor" not in kwargs: kwargs["flavor"] = "desktop-browser" push_to_try = kwargs.pop("push_to_try", False) if push_to_try: sys.path.append(str(Path(self.topsrcdir, "tools", "tryselect"))) from tryselect.push import push_to_try perftest_parameters = {} args = script_info.update_args( **original_parser.get_user_args(kwargs)) platform = args.pop("try_platform", "linux") if isinstance(platform, str): platform = [platform] platform = [ "%s-%s" % (plat, script_info.script_type.name) for plat in platform ] for plat in platform: if plat not in _TRY_PLATFORMS: # we can extend platform support here: linux, win, macOs, pixel2 # by adding more jobs in taskcluster/ci/perftest/kind.yml # then picking up the right one here raise NotImplementedError( "%r doesn't exist or is not yet supported" % plat) def relative(path): if path.startswith(self.topsrcdir): return path[len(self.topsrcdir):].lstrip(os.sep) return path for name, value in args.items(): # ignore values that are set to default if original_parser.get_default(name) == value: continue if name == "tests": value = [relative(path) for path in value] perftest_parameters[name] = value parameters = { "try_task_config": { "tasks": [_TRY_PLATFORMS[plat] for plat in platform], "perftest-options": perftest_parameters, }, "try_mode": "try_task_config", } task_config = {"parameters": parameters, "version": 2} if args["verbose"]: print("Pushing run to try...") print(json.dumps(task_config, indent=4, sort_keys=True)) push_to_try("perftest", "perftest", try_task_config=task_config) return from mozperftest.runner import run_tests run_tests(self, kwargs, original_parser.get_user_args(kwargs)) print("\nFirefox. Fast For Good.\n")
def test_update_args_metrics_json_failure(): args = {"perfherder_metrics": ["yey"]} info = ScriptInfo(EXAMPLE_TEST) with pytest.raises(BadOptionTypeError): info.update_args(**args)
def run_tests(mach_cmd, kwargs, client_args): """This tests runner can be used directly via main or via Mach. When the --on-try option is used, the test runner looks at the `PERFTEST_OPTIONS` environment variable that contains all options passed by the user via a ./mach perftest --push-to-try call. """ _setup_path() on_try = kwargs.pop("on_try", False) # trying to get the arguments from the task params if on_try: try_options = json.loads(os.environ["PERFTEST_OPTIONS"]) print("Loading options from $PERFTEST_OPTIONS") print(json.dumps(try_options, indent=4, sort_keys=True)) kwargs.update(try_options) from mozperftest.utils import build_test_list from mozperftest import MachEnvironment, Metadata from mozperftest.hooks import Hooks from mozperftest.script import ScriptInfo hooks_file = kwargs.pop("hooks", None) hooks = Hooks(mach_cmd, hooks_file) verbose = kwargs.get("verbose", False) log_level = logging.DEBUG if verbose else logging.INFO # If we run through mach, we just want to set the level # of the existing termminal handler. # Otherwise, we're adding it. if mach_cmd.log_manager.terminal_handler is not None: mach_cmd.log_manager.terminal_handler.level = log_level else: mach_cmd.log_manager.add_terminal_logging(level=log_level) mach_cmd.log_manager.enable_all_structured_loggers() mach_cmd.log_manager.enable_unstructured() try: # Only pass the virtualenv to the before_iterations hook # so that users can install test-specific packages if needed. mach_cmd.activate_virtualenv() kwargs["virtualenv"] = mach_cmd.virtualenv_manager hooks.run("before_iterations", kwargs) del kwargs["virtualenv"] tests, tmp_dir = build_test_list(kwargs["tests"]) for test in tests: script = ScriptInfo(test) # update the arguments with options found in the script, if any args = script.update_args(**client_args) # XXX this should be the default pool for update_args for key, value in kwargs.items(): if key not in args: args[key] = value # update the hooks, or use a copy of the general one script_hooks = Hooks(mach_cmd, args.pop("hooks", hooks_file)) flavor = args["flavor"] if flavor == "doc": print(script) continue for iteration in range(args.get("test_iterations", 1)): try: env = MachEnvironment(mach_cmd, hooks=script_hooks, **args) metadata = Metadata(mach_cmd, env, flavor, script) script_hooks.run("before_runs", env) try: with env.frozen() as e: e.run(metadata) finally: script_hooks.run("after_runs", env) finally: if tmp_dir is not None: shutil.rmtree(tmp_dir) finally: hooks.cleanup()