Ejemplo n.º 1
0
def test_build_test_list():
    tests = [EXAMPLE_TESTS_DIR, "https://some/location/perftest_one.js"]
    try:
        files, tmp_dir = build_test_list(tests)
        assert len(files) == 2
    finally:
        shutil.rmtree(tmp_dir)
Ejemplo n.º 2
0
def run_tests(mach_cmd, **kwargs):
    """This tests runner can be used directly via main or via Mach.

    When the --on-try option is used, the test runner looks at the
    `PERFTEST_OPTIONS` environment variable that contains all options passed by
    the user via a ./mach perftest --push-to-try call.
    """
    _setup_path()
    on_try = kwargs.pop("on_try", False)

    # trying to get the arguments from the task params
    if on_try:
        try_options = json.loads(os.environ['PERFTEST_OPTIONS'])
        kwargs.update(try_options)

    from mozperftest.utils import build_test_list, install_package
    from mozperftest import MachEnvironment, Metadata

    flavor = kwargs["flavor"]
    kwargs["tests"], tmp_dir = build_test_list(
        kwargs["tests"], randomized=flavor != "doc"
    )

    try:
        if flavor == "doc":
            location = os.path.join(
                mach_cmd.topsrcdir, "third_party", "python", "esprima"
            )
            install_package(mach_cmd.virtualenv_manager, location)

            from mozperftest.scriptinfo import ScriptInfo

            for test in kwargs["tests"]:
                print(ScriptInfo(test))
            return

        env = MachEnvironment(mach_cmd, **kwargs)
        try:
            metadata = Metadata(mach_cmd, env, flavor)
            env.run_hook("before_runs")
            try:
                with env.frozen() as e:
                    e.run(metadata)
            finally:
                env.run_hook("after_runs")
        finally:
            env.cleanup()
    finally:
        if tmp_dir is not None:
            shutil.rmtree(tmp_dir)
Ejemplo n.º 3
0
def run_tests(mach_cmd, **kwargs):
    """This tests runner can be used directly via main or via Mach.

    When the --on-try option is used, the test runner looks for the
    `parameters.yml` artifact that contains all options passed by
    the used via a ./mach perftest --push-to-try call.
    """
    _setup_path()
    on_try = kwargs.pop("on_try", False)

    # trying to get the arguments from the task params
    if on_try:
        params = _get_params()
        kwargs.update(params["try_options"]["perftest"])

    from mozperftest.utils import build_test_list, install_package
    from mozperftest import MachEnvironment, Metadata

    flavor = kwargs["flavor"]
    kwargs["tests"] = build_test_list(kwargs["tests"],
                                      randomized=flavor != "doc")

    if flavor == "doc":
        location = os.path.join(mach_cmd.topsrcdir, "third_party", "python",
                                "esprima")
        install_package(mach_cmd.virtualenv_manager, location)

        from mozperftest.scriptinfo import ScriptInfo

        for test in kwargs["tests"]:
            print(ScriptInfo(test))
        return

    env = MachEnvironment(mach_cmd, **kwargs)
    metadata = Metadata(mach_cmd, env, flavor)
    env.run_hook("before_runs")
    try:
        with env.frozen() as e:
            e.run(metadata)
    finally:
        env.run_hook("after_runs")
Ejemplo n.º 4
0
def run_tests(mach_cmd, **kwargs):
    """This tests runner can be used directly via main or via Mach.

    When the --on-try option is used, the test runner looks at the
    `PERFTEST_OPTIONS` environment variable that contains all options passed by
    the user via a ./mach perftest --push-to-try call.
    """
    _setup_path()
    on_try = kwargs.pop("on_try", False)

    # trying to get the arguments from the task params
    if on_try:
        try_options = json.loads(os.environ["PERFTEST_OPTIONS"])
        kwargs.update(try_options)

    from mozperftest.utils import build_test_list
    from mozperftest import MachEnvironment, Metadata
    from mozperftest.hooks import Hooks

    hooks = Hooks(mach_cmd, kwargs.pop("hooks", None))
    verbose = kwargs.get("verbose", False)
    log_level = logging.DEBUG if verbose else logging.INFO

    # If we run through mach, we just  want to set the level
    # of the existing termminal handler.
    # Otherwise, we're adding it.
    if mach_cmd.log_manager.terminal_handler is not None:
        mach_cmd.log_manager.terminal_handler.level = log_level
    else:
        mach_cmd.log_manager.add_terminal_logging(level=log_level)
        mach_cmd.log_manager.enable_all_structured_loggers()
        mach_cmd.log_manager.enable_unstructured()

    try:
        hooks.run("before_iterations", kwargs)

        for iteration in range(kwargs.get("test_iterations", 1)):
            flavor = kwargs["flavor"]
            kwargs["tests"], tmp_dir = build_test_list(
                kwargs["tests"], randomized=flavor != "doc")
            try:
                # XXX this doc is specific to browsertime scripts
                # maybe we want to move it
                if flavor == "doc":
                    from mozperftest.test.browsertime.script import ScriptInfo

                    for test in kwargs["tests"]:
                        print(ScriptInfo(test))
                    return

                env = MachEnvironment(mach_cmd, hooks=hooks, **kwargs)
                metadata = Metadata(mach_cmd, env, flavor)
                hooks.run("before_runs", env)
                try:
                    with env.frozen() as e:
                        e.run(metadata)
                finally:
                    hooks.run("after_runs", env)
            finally:
                if tmp_dir is not None:
                    shutil.rmtree(tmp_dir)
    finally:
        hooks.cleanup()
Ejemplo n.º 5
0
def run_tests(mach_cmd, kwargs, client_args):
    """This tests runner can be used directly via main or via Mach.

    When the --on-try option is used, the test runner looks at the
    `PERFTEST_OPTIONS` environment variable that contains all options passed by
    the user via a ./mach perftest --push-to-try call.
    """
    _setup_path()
    on_try = kwargs.pop("on_try", False)

    # trying to get the arguments from the task params
    if on_try:
        try_options = json.loads(os.environ["PERFTEST_OPTIONS"])
        print("Loading options from $PERFTEST_OPTIONS")
        print(json.dumps(try_options, indent=4, sort_keys=True))
        kwargs.update(try_options)

    from mozperftest.utils import build_test_list
    from mozperftest import MachEnvironment, Metadata
    from mozperftest.hooks import Hooks
    from mozperftest.script import ScriptInfo

    hooks_file = kwargs.pop("hooks", None)
    hooks = Hooks(mach_cmd, hooks_file)
    verbose = kwargs.get("verbose", False)
    log_level = logging.DEBUG if verbose else logging.INFO

    # If we run through mach, we just  want to set the level
    # of the existing termminal handler.
    # Otherwise, we're adding it.
    if mach_cmd.log_manager.terminal_handler is not None:
        mach_cmd.log_manager.terminal_handler.level = log_level
    else:
        mach_cmd.log_manager.add_terminal_logging(level=log_level)
        mach_cmd.log_manager.enable_all_structured_loggers()
        mach_cmd.log_manager.enable_unstructured()

    try:
        # Only pass the virtualenv to the before_iterations hook
        # so that users can install test-specific packages if needed.
        mach_cmd.activate_virtualenv()
        kwargs["virtualenv"] = mach_cmd.virtualenv_manager
        hooks.run("before_iterations", kwargs)
        del kwargs["virtualenv"]

        tests, tmp_dir = build_test_list(kwargs["tests"])

        for test in tests:
            script = ScriptInfo(test)

            # update the arguments with options found in the script, if any
            args = script.update_args(**client_args)
            # XXX this should be the default pool for update_args
            for key, value in kwargs.items():
                if key not in args:
                    args[key] = value

            # update the hooks, or use a copy of the general one
            script_hooks = Hooks(mach_cmd, args.pop("hooks", hooks_file))

            flavor = args["flavor"]
            if flavor == "doc":
                print(script)
                continue

            for iteration in range(args.get("test_iterations", 1)):
                try:
                    env = MachEnvironment(mach_cmd, hooks=script_hooks, **args)
                    metadata = Metadata(mach_cmd, env, flavor, script)
                    script_hooks.run("before_runs", env)
                    try:
                        with env.frozen() as e:
                            e.run(metadata)
                    finally:
                        script_hooks.run("after_runs", env)
                finally:
                    if tmp_dir is not None:
                        shutil.rmtree(tmp_dir)
    finally:
        hooks.cleanup()