예제 #1
0
def test_open_file():
    data = json.dumps({"1": 2})

    with temp_file(name="data.json", content=data) as f:
        res = open_file(f)
        assert res == {"1": 2}

    with temp_file(name="data.txt", content="yeah") as f:
        assert open_file(f) == "yeah"
def test_xpcshell_perfherder(*mocked):
    mach_cmd, metadata, env = get_running_env(flavor="xpcshell",
                                              perfherder=True,
                                              xpcshell_cycles=10)

    sys = env.layers[SYSTEM]
    xpcshell = env.layers[TEST]
    env.set_arg("tests", [str(EXAMPLE_XPCSHELL_TEST)])
    metrics = env.layers[METRICS]

    with temp_file() as output:
        env.set_arg("output", output)
        try:
            with sys as s, xpcshell as x, metrics as m:
                m(x(s(metadata)))
        finally:
            shutil.rmtree(mach_cmd._mach_context.state_dir)

        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "xpcshell"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 3
    assert output["suites"][0]["value"] > 0

    for subtest in output["suites"][0]["subtests"]:
        assert subtest["name"].startswith("metrics")
예제 #3
0
def test_perfherder_exlude_stats():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only one firstPaint metric was obtained
    assert ("browserScripts.timings.firstPaint" == output["suites"][0]
            ["subtests"][0]["name"])
예제 #4
0
def test_perfherder():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 10
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint metrics were obtained
    for subtest in output["suites"][0]["subtests"]:
        assert "firstPaint" in subtest["name"]
예제 #5
0
def test_perfherder_split_by():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "fenix",
        "perfherder-metrics": [metric_fields("firstPaint")],
        "perfherder-split-by": "browserScripts.pageinfo.url",
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Sanity check
    assert len(output["suites"]) == 1

    # We should have 2 subtests (1 per URL)
    assert len(output["suites"][0]["subtests"]) == 2

    # Check to make sure that they were properly split
    names = [subtest["name"] for subtest in output["suites"][0]["subtests"]]
    assert sorted(names) == [
        "browserScripts.timings.firstPaint https://www.mozilla.org/en-US/",
        "browserScripts.timings.firstPaint https://www.sitespeed.io/",
    ]
    for i in range(2):
        assert len(output["suites"][0]["subtests"][i]["replicates"]) == 1
예제 #6
0
def test_notebookupload_with_filter(notebook, no_filter):

    options = {
        "notebook-metrics": [],
        "notebook-prefix": "",
        "notebook": True,
        "notebook-analysis": ["scatterplot"],
        "notebook-analyze-strings": no_filter,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)

    if no_filter:
        args, kwargs = notebook.call_args_list[0]
        assert type(kwargs["data"][0]["data"][0]["value"]) == str
    else:
        for call in notebook.call_args_list:
            args, kwargs = call
            for a in args:
                for data_dict in a:
                    for data in data_dict["data"]:
                        assert type(data["value"]) in (int, float)

    notebook.assert_has_calls(
        [mock.call().post_to_iodide(["scatterplot"], start_local_server=True)])
예제 #7
0
def test_perfherder_with_extra_options():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,extraOptions:['second-option']"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert sorted(output["suites"][0]["extraOptions"]) == sorted(
        ["option", "second-option"]
    )
예제 #8
0
def test_compare_to_success(notebook, stats):
    options = {
        "notebook-metrics": [metric_fields("firstPaint")],
        "notebook-prefix": "",
        "notebook-analysis": [],
        "notebook": True,
        "notebook-compare-to": [str(BT_DATA.parent)],
        "notebook-stats": stats,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)

    args, kwargs = notebook.call_args_list[0]

    if not stats:
        assert len(kwargs["data"]) == 2
        assert kwargs["data"][0]["name"] == "browsertime- newest run"
        assert kwargs["data"][1]["name"] == "browsertime-results"
    else:
        assert any("statistics" in element["subtest"]
                   for element in kwargs["data"])

    notebook.assert_has_calls(
        [mock.call().post_to_iodide(["compare"], start_local_server=True)])
예제 #9
0
def test_android_log(*mocked):
    with temp_file() as logcat, temp_dir() as output:
        args = {
            "flavor": "mobile-browser",
            "android-install-apk": ["this.apk"],
            "android": True,
            "console": True,
            "android-timeout": 30,
            "android-capture-adb": "stdout",
            "android-capture-logcat": logcat,
            "android-app-name": "org.mozilla.fenix",
            "androidlog": True,
            "output": output,
        }

        mach_cmd, metadata, env = get_running_env(**args)
        env.set_arg("tests", [EXAMPLE_TEST])

        with env.layers[SYSTEM] as sys, env.layers[TEST] as andro:
            metadata = andro(sys(metadata))

        # we want to drop the first result
        metadata._results = metadata._results[1:]
        with env.layers[METRICS] as metrics:
            metadata = metrics(metadata)

        assert pathlib.Path(output, "LogCatstd-output.json").exists()
예제 #10
0
def test_visual_metrics(device):
    os.environ["VISUALMETRICS_PY"] = ""
    mach_cmd, metadata, env = get_running_env(
        visualmetrics=True,
        perfherder=True,
        verbose=True,
        tests=[EXAMPLE_TEST],
    )
    metrics = env.layers[METRICS]

    metadata.add_result({
        "results": str(BT_DATA_VIDEO.parent),
        "name": "browsertime"
    })

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m:
            metadata = m(metadata)

        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    visual_metrics = [i["name"] for i in output["suites"][1]["subtests"]]
    assert "VisualProgress96" in visual_metrics
예제 #11
0
def test_perfherder_with_subunits():
    options = {
        "perfherder":
        True,
        "perfherder-stats":
        True,
        "perfherder-prefix":
        "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,shouldAlert:True,unit:a-unit"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert all([
        subtest["unit"] == "a-unit"
        for subtest in output["suites"][0]["subtests"]
        if "resource" in subtest["name"]
    ])
    assert all([
        subtest["unit"] == "ms" for subtest in output["suites"][0]["subtests"]
        if "firstPaint" in subtest["name"]
    ])
예제 #12
0
def test_get_output_dir():
    with temp_file() as temp_dir:
        output_dir = get_output_dir(temp_dir)
        assert output_dir.exists()
        assert output_dir.is_dir()

        output_dir = get_output_dir(output=temp_dir, folder="artifacts")
        assert output_dir.exists()
        assert output_dir.is_dir()
        assert "artifacts" == output_dir.parts[-1]
예제 #13
0
def test_preview():
    content = Path(EXAMPLE_TEST)
    line = f"[bt][sometag] {content.name} in {content.parent}"
    test_objects = [{"path": str(content)}]
    cache = Path(Path.home(), ".mozbuild", ".perftestfuzzy")
    with cache.open("w") as f:
        f.write(json.dumps(test_objects))

    with temp_file(content=str(line)) as tasklist, silence() as out:
        main(args=["-t", tasklist])

    stdout, __ = out
    stdout.seek(0)
    assert ":owner: Performance Testing Team" in stdout.read()
예제 #14
0
def test_perfherder_logcat():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("TimeToDisplayed")],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    def processor(groups):
        """Parses the time from a displayed time string into milliseconds."""
        return (float(groups[0]) * 1000) + float(groups[1])

    re_w_group = r".*Displayed.*org\.mozilla\.fennec_aurora.*\+([\d]+)s([\d]+)ms.*"
    metadata.add_result(
        {
            "results": str(HERE / "data" / "home_activity.txt"),
            "transformer": "LogCatTimeTransformer",
            "transformer-options": {
                "first-timestamp": re_w_group,
                "processor": processor,
                "transform-subtest-name": "TimeToDisplayed",
            },
            "name": "LogCat",
        }
    )

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m:  # , silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only the TimeToDisplayd metric was obtained
    for subtest in output["suites"][0]["subtests"]:
        assert "TimeToDisplayed" in subtest["name"]
예제 #15
0
def test_android_log_adb():
    with temp_file() as log_adb:
        args = {
            "android-install-apk": ["gve_nightly_api16"],
            "android": True,
            "android-timeout": 60,
            "android-app-name": "org.mozilla.geckoview_example",
            "android-capture-adb": log_adb,
        }

        mach_cmd, metadata, env = get_running_env(**args)
        system = env.layers[SYSTEM]
        with system as android, silence(system), pytest.raises(DeviceError):
            android(metadata)
        with open(log_adb) as f:
            assert "DEBUG ADBLoggedDevice" in f.read()
예제 #16
0
def test_perfherder_bad_app_name():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "this is not an app",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    # This will raise an error because the options method
    # we use in tests skips the `choices` checks.
    with pytest.raises(jsonschema.ValidationError):
        with temp_file() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)
예제 #17
0
def test_perfherder_missing_data_failure():
    options = {"perfherder": True, "perfherder-prefix": ""}

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "baddata.json")
        with nodatajson.open("w") as f:
            json.dump({"bad data": "here"}, f)

        metadata.add_result({"results": str(nodatajson), "name": "browsertime"})

        with pytest.raises(PerfherderValidDataError):
            with temp_file() as output:
                env.set_arg("output", output)
                with metrics as m, silence():
                    m(metadata)
예제 #18
0
def test_compare_to_invalid_parameter(notebook, filepath):
    options = {
        "notebook-metrics": [metric_fields("firstPaint")],
        "notebook-prefix": "",
        "notebook-analysis": [],
        "notebook": True,
        "notebook-compare-to": [filepath],
    }

    metrics, metadata, env = setup_env(options)

    with pytest.raises(Exception) as einfo:
        with temp_file() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

    if filepath == "invalidPath":
        assert "does not exist" in str(einfo.value)
    else:
        assert "not a directory" in str(einfo.value)
예제 #19
0
def test_perfherder_app_name():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "fenix",
        "perfherder-metrics": ["firstPaint"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Make sure that application setting is correct
    assert output["application"]["name"] == "fenix"
    assert "version" not in output["application"]
예제 #20
0
def test_android_log_cat(device):
    with temp_file() as log_cat:
        args = {
            "android-install-apk": ["gve_nightly_api16"],
            "android": True,
            "android-timeout": 60,
            "android-app-name": "org.mozilla.geckoview_example",
            "android-capture-logcat": log_cat,
            "android-clear-logcat": True,
            "android-capture-adb": "stdout",
        }

        mach_cmd, metadata, env = get_running_env(**args)
        system = env.layers[SYSTEM]
        andro = system.layers[0]

        with system as layer, silence(system):
            andro.device = device
            andro.device.get_logcat = mock.Mock(result_value=[])
            layer(metadata)

        andro.device.get_logcat.assert_called()
        andro.device.clear_logcat.assert_called()
예제 #21
0
def test_perfherder_names_simplified_with_no_exclusions():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")],
        "perfherder-simplify-names": True,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert output["suites"][0]["value"] > 0

    # In this case, some metrics will be called "median", "mean", etc.
    # since those are the simplifications of the first statistics entries
    # that were found.
    assert not all(
        [
            "firstPaint" in subtest["name"]
            or "duration" in subtest["name"]
            or "count" in subtest["name"]
            for subtest in output["suites"][0]["subtests"]
        ]
    )

    found_all = {"firstPaint": False, "count": False, "duration": False}
    for subtest in output["suites"][0]["subtests"]:
        if subtest["name"] in found_all:
            found_all[subtest["name"]] = True
            continue

    for entry, value in found_all.items():
        assert found_all[entry], f"Failed finding metric simplification for {entry}"

    # Only a portion of the metrics should still have statistics in
    # their name due to a naming conflict that only emits a warning
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" in subtest["name"]
            ]
        )
        == 18
    )
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" not in subtest["name"]
            ]
        )
        == 12
    )
예제 #22
0
def test_perfherder_simple_names():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")],
        "perfherder-simplify-names": True,
        "perfherder-simplify-exclude": ["statistics"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint/resource metrics were obtained and
    # that simplifications occurred
    assert all(
        [
            "firstPaint" in subtest["name"]
            or "duration" in subtest["name"]
            or "count" in subtest["name"]
            for subtest in output["suites"][0]["subtests"]
        ]
    )

    found_all = {"firstPaint": False, "count": False, "duration": False}
    for subtest in output["suites"][0]["subtests"]:
        if subtest["name"] in found_all:
            found_all[subtest["name"]] = True
            continue
        assert any([name in subtest["name"] for name in found_all.keys()])
        # Statistics are not simplified so any metric that isn't
        # in the list of known metrics must be a statistic
        assert "statistics" in subtest["name"]

    for entry, value in found_all.items():
        assert found_all[entry], f"Failed finding metric simplification for {entry}"

    # Statistics are not simplified by default
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" in subtest["name"]
            ]
        )
        == 27
    )
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" not in subtest["name"]
            ]
        )
        == 3
    )
예제 #23
0
def test_download_file_success():
    with temp_file() as target:
        download_file("http://content", Path(target), retry_sleep=0.1)
        with open(target) as f:
            assert f.read() == "some content"
예제 #24
0
def test_download_file_fails():
    with temp_file() as target, silence(), pytest.raises(Exception):
        download_file("http://I don't exist", Path(target), retry_sleep=0.1)