Beispiel #1
0
def test_compare_to_success(notebook, stats):
    options = {
        "notebook-metrics": [metric_fields("firstPaint")],
        "notebook-prefix": "",
        "notebook-analysis": [],
        "notebook": True,
        "notebook-compare-to": [str(BT_DATA.parent)],
        "notebook-stats": stats,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)

    args, kwargs = notebook.call_args_list[0]

    if not stats:
        assert len(kwargs["data"]) == 2
        assert kwargs["data"][0]["name"] == "browsertime- newest run"
        assert kwargs["data"][1]["name"] == "browsertime-results"
    else:
        assert any("statistics" in element["subtest"]
                   for element in kwargs["data"])

    notebook.assert_has_calls(
        [mock.call().post_to_iodide(["compare"], start_local_server=True)])
Beispiel #2
0
    def run(self, metadata):
        tests = self.get_arg("tests", [])
        if len(tests) != 1:
            # for now we support one single test
            raise NotImplementedError(str(tests))

        test = Path(tests[0])
        if not test.exists():
            raise FileNotFoundError(str(test))

        # let's grab the manifest
        manifest = Path(test.parent, "xpcshell.ini")
        if not manifest.exists():
            raise FileNotFoundError(str(manifest))

        import runxpcshelltests

        xpcshell = runxpcshelltests.XPCShellTests(log=self)
        kwargs = {}
        kwargs["testPaths"] = test.name
        kwargs["verbose"] = True
        kwargs["xpcshell"] = self.mach_cmd.get_binary_path("xpcshell")
        kwargs["mozInfo"] = str(Path(self.topobjdir, "mozinfo.json"))
        kwargs["symbolsPath"] = str(Path(self.distdir, "crashreporter-symbols"))
        kwargs["logfiles"] = True
        kwargs["profileName"] = "firefox"
        kwargs["pluginsPath"] = str(Path(self.distdir, "plugins"))
        kwargs["testingModulesDir"] = str(Path(self.topobjdir, "_tests/modules"))
        kwargs["utility_path"] = self.bindir
        kwargs["manifest"] = manifest
        kwargs["totalChunks"] = 1
        cycles = self.get_arg("cycles", 1)
        self.info("Running %d cycles" % cycles)

        for cycle in range(cycles):
            with temp_dir() as tmp, silence():
                kwargs["tempDir"] = tmp
                if not xpcshell.runTests(kwargs):
                    raise XPCShellTestError()
            self.info("Cycle %d" % (cycle + 1))
        self.info("tests done.")

        results = defaultdict(list)
        for m in self.metrics:
            for key, val in m.items():
                results[key].append(val)

        metadata.add_result(
            {
                "name": test.name,
                "framework": {"name": "xpcshell"},
                "transformer": "mozperftest.test.xpcshell:XPCShellData",
                "results": [
                    {"values": measures, "name": subtest}
                    for subtest, measures in results.items()
                ],
            }
        )

        return metadata
Beispiel #3
0
def test_replay_url(killer, running_env):
    mach_cmd, metadata, env = running_env
    system = env.layers[SYSTEM]
    env.set_arg("proxy-replay", "http://example.dump")

    with system as proxy, silence():
        proxy(metadata)
Beispiel #4
0
def test_notebookupload_with_filter(notebook, no_filter):

    options = {
        "notebook-metrics": [],
        "notebook-prefix": "",
        "notebook": True,
        "notebook-analysis": ["scatterplot"],
        "notebook-analyze-strings": no_filter,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)

    if no_filter:
        args, kwargs = notebook.call_args_list[0]
        assert type(kwargs["data"][0]["data"][0]["value"]) == str
    else:
        for call in notebook.call_args_list:
            args, kwargs = call
            for a in args:
                for data_dict in a:
                    for data in data_dict["data"]:
                        assert type(data["value"]) in (int, float)

    notebook.assert_has_calls(
        [mock.call().post_to_iodide(["scatterplot"], start_local_server=True)])
def test_run_python_script_failed(*mocked):
    with _get_command(PerftestTests) as test, silence(test) as captured:
        test._run_python_script("nothing")

    stdout, stderr = captured
    stdout.seek(0)
    assert stdout.read().endswith("[FAILED]\n")
Beispiel #6
0
def test_browser_desktop(*mocked):
    mach_cmd, metadata, env = get_running_env(
        browsertime_iterations=1,
        browsertime_extra_options="one=1,two=2",
        tests=[EXAMPLE_TEST],
        browsertime_no_window_recorder=False,
        browsertime_viewport_size="1234x567",
    )
    browser = env.layers[TEST]
    sys = env.layers[SYSTEM]

    try:
        with sys as s, browser as b, silence():
            # just checking that the setup_helper property gets
            # correctly initialized
            browsertime = browser.layers[-1]
            assert browsertime.setup_helper is not None
            helper = browsertime.setup_helper
            assert browsertime.setup_helper is helper

            b(s(metadata))
    finally:
        shutil.rmtree(mach_cmd._mach_context.state_dir)

    assert mach_cmd.run_process.call_count == 1
    cmd = " ".join(mach_cmd.run_process.call_args[0][0])
    # check that --firefox.binaryPath is set automatically
    assert "--firefox.binaryPath" in cmd
Beispiel #7
0
def test_proxy(install_mozproxy):
    mach_cmd, metadata, env = get_running_env(proxy=True)
    system = env.layers[SYSTEM]

    # XXX this will run for real, we need to mock HTTP calls
    with system as proxy, silence():
        proxy(metadata)
Beispiel #8
0
def test_checkout_python_script():
    with silence() as captured:
        assert checkout_python_script(_Venv(), "lib2to3", ["--help"])

    stdout, stderr = captured
    stdout.seek(0)
    assert stdout.read() == "=> lib2to3 [OK]\n"
Beispiel #9
0
def test_perfherder_with_extra_options():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,extraOptions:['second-option']"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert sorted(output["suites"][0]["extraOptions"]) == sorted(
        ["option", "second-option"]
    )
Beispiel #10
0
def test_perfherder_split_by():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "fenix",
        "perfherder-metrics": [metric_fields("firstPaint")],
        "perfherder-split-by": "browserScripts.pageinfo.url",
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Sanity check
    assert len(output["suites"]) == 1

    # We should have 2 subtests (1 per URL)
    assert len(output["suites"][0]["subtests"]) == 2

    # Check to make sure that they were properly split
    names = [subtest["name"] for subtest in output["suites"][0]["subtests"]]
    assert sorted(names) == [
        "browserScripts.timings.firstPaint https://www.mozilla.org/en-US/",
        "browserScripts.timings.firstPaint https://www.sitespeed.io/",
    ]
    for i in range(2):
        assert len(output["suites"][0]["subtests"][i]["replicates"]) == 1
Beispiel #11
0
def test_perfherder_with_subunits():
    options = {
        "perfherder":
        True,
        "perfherder-stats":
        True,
        "perfherder-prefix":
        "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,shouldAlert:True,unit:a-unit"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert all([
        subtest["unit"] == "a-unit"
        for subtest in output["suites"][0]["subtests"]
        if "resource" in subtest["name"]
    ])
    assert all([
        subtest["unit"] == "ms" for subtest in output["suites"][0]["subtests"]
        if "firstPaint" in subtest["name"]
    ])
Beispiel #12
0
def test_perfherder_metrics_filtering():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["I shouldn't match a metric"],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "nodata.json")
        with nodatajson.open("w") as f:
            json.dump({}, f)

        metadata.add_result({
            "results": str(nodatajson),
            "name": "browsertime"
        })

        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

            assert not pathlib.Path(output, "perfherder-data.json").exists()
Beispiel #13
0
def test_perfherder():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 10
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint metrics were obtained
    for subtest in output["suites"][0]["subtests"]:
        assert "firstPaint" in subtest["name"]
def test_run_python_script(*mocked):
    with _get_command(PerftestTests) as test, silence(test) as captured:
        test._run_python_script("lib2to3", *["--help"])

    stdout, stderr = captured
    stdout.seek(0)
    assert stdout.read() == "=> lib2to3 [OK]\n"
def test_browser(*mocked):
    mach_cmd, metadata, env = get_running_env(
        android=True,
        android_app_name="something",
        browsertime_geckodriver="GECKODRIVER",
        browsertime_iterations=1,
        browsertime_extra_options="one=1,two=2",
    )

    browser = env.layers[BROWSER]
    env.set_arg("tests", [EXAMPLE_TEST])

    try:
        with browser as b, silence():
            b(metadata)
    finally:
        shutil.rmtree(mach_cmd._mach_context.state_dir)
    assert mach_cmd.run_process.call_count == 1

    # Make sure all arguments are of type str
    for option in mach_cmd.run_process.call_args[0][0]:
        assert isinstance(option, str)

    cmd = " ".join(mach_cmd.run_process.call_args[0][0])
    assert EXAMPLE_TEST in cmd
    assert "--firefox.geckodriverPath GECKODRIVER" in cmd
    assert "--one 1" in cmd
    assert "--two 2" in cmd

    results = metadata.get_results()
    assert len(results) == 1
    assert set(list(results[0].keys())) - set(["name", "results"]) == set()
    assert results[0]["name"] == "Example"
Beispiel #16
0
def test_perfherder_exlude_stats():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only one firstPaint metric was obtained
    assert ("browserScripts.timings.firstPaint" == output["suites"][0]
            ["subtests"][0]["name"])
Beispiel #17
0
def test_run_python_script_failed():
    with silence() as captured:
        assert not checkout_python_script(_Venv(), "nothing")

    stdout, stderr = captured
    stdout.seek(0)
    assert stdout.read().endswith("[FAILED]\n")
Beispiel #18
0
def test_playback_no_file(killer):
    mach_cmd, metadata, env = running_env()
    system = env.layers[SYSTEM]
    env.set_arg("proxy-mode", "playback")

    with system as proxy, pytest.raises(ValueError) as excinfo, silence():
        proxy(metadata)
    assert "Proxy file not provided!!" in str(excinfo.value)
def test_hooks_state(venv, env):
    kwargs = {
        "tests": [EXAMPLE_TEST],
        "hooks": STATE_HOOKS,
        "flavor": "desktop-browser",
    }
    with _get_command() as test, silence(test):
        test.run_perftest(**kwargs)
Beispiel #20
0
def test_hooks_state(venv, env):
    kwargs = {
        "tests": [EXAMPLE_TEST],
        "hooks": STATE_HOOKS,
        "flavor": "desktop-browser",
    }
    with _get_command() as (cmd, command_context), silence(command_context):
        cmd(command_context, **kwargs)
def test_push_command(push_to_try, venv):
    with _get_command() as test, silence(test):
        test.run_perftest(
            tests=[EXAMPLE_TEST],
            flavor="desktop-browser",
            push_to_try=True,
            try_platform="g5",
        )
        push_to_try.assert_called()
Beispiel #22
0
def test_test_runner(*mocked):
    # simulating on try to run the paths parser
    old = mach_commands.ON_TRY
    mach_commands.ON_TRY = True
    with _get_command(PerftestTests) as test, silence(test), temporary_env(
            MOZ_AUTOMATION="1"):
        test.run_tests(tests=[EXAMPLE_TESTS_DIR])

    mach_commands.ON_TRY = old
Beispiel #23
0
def test_record(killer, running_env):
    mach_cmd, metadata, env = running_env
    system = env.layers[SYSTEM]
    with tempfile.TemporaryDirectory() as tmpdir:
        recording = os.path.join(tmpdir, "recording.dump")
        env.set_arg("proxy-record", recording)

        with system as proxy, silence():
            proxy(metadata)
Beispiel #24
0
def test_push_command(push_to_try, venv):
    with _get_command() as (cmd, command_context), silence(command_context):
        cmd(
            command_context,
            tests=[EXAMPLE_TEST],
            flavor="desktop-browser",
            push_to_try=True,
            try_platform="g5",
        )
        push_to_try.assert_called()
Beispiel #25
0
def test_command_iterations(venv, env):
    kwargs = {
        "tests": [EXAMPLE_TEST],
        "hooks": ITERATION_HOOKS,
        "flavor": "desktop-browser",
    }
    with _get_command() as (cmd, command_context), silence(command_context):
        cmd(command_context, **kwargs)
        # the hook changes the iteration value to 5.
        # each iteration generates 5 calls, so we want to see 25
        assert len(env.mock_calls) == 25
Beispiel #26
0
def test_replay_url(killer):
    mach_cmd, metadata, env = running_env()
    system = env.layers[SYSTEM]
    env.set_arg("proxy-mode", "playback")
    env.set_arg("proxy-file", "http://example.dump")

    with system as proxy, silence():
        proxy(metadata)

    browser_prefs = metadata.get_options("browser_prefs")
    assert browser_prefs["network.proxy.http_port"] == 1234
Beispiel #27
0
def test_replay_url(install_mozproxy):
    mach_cmd, metadata, env = get_running_env(proxy=True)
    system = env.layers[SYSTEM]
    env.set_arg("proxy-replay", "http://example.dump")

    # XXX this will run for real, we need to mock HTTP calls
    with system as proxy, silence():
        proxy(metadata)
        # Give mitmproxy a bit of time to start up so we can verify that it's
        # actually running before we tear things down.
        time.sleep(5)
Beispiel #28
0
def test_record(install_mozproxy):
    mach_cmd, metadata, env = get_running_env(proxy=True)
    system = env.layers[SYSTEM]
    with tempfile.TemporaryDirectory() as tmpdir:
        recording = os.path.join(tmpdir, "recording.dump")
        env.set_arg("proxy-record", recording)

        # XXX this will run for real, we need to mock HTTP calls
        with system as proxy, silence():
            proxy(metadata)
        assert os.path.exists(recording)
Beispiel #29
0
def test_android():
    args = {
        "android-install-apk": ["this"],
        "android": True,
        "android-app-name": "org.mozilla.fenix",
    }

    mach_cmd, metadata, env = get_running_env(**args)
    system = env.layers[SYSTEM]
    with system as android, silence(system):
        android(metadata)
def test_install_url_bad(*mocked):
    mach, metadata, env = get_running_env(browsertime_install_url="meh")
    browser = env.layers[BROWSER]
    env.set_arg("tests", [EXAMPLE_TEST])

    with pytest.raises(ValueError):
        try:
            with browser as b, silence():
                b(metadata)
        finally:
            shutil.rmtree(mach._mach_context.state_dir)