コード例 #1
0
def test_perfherder_with_subunits():
    options = {
        "perfherder":
        True,
        "perfherder-stats":
        True,
        "perfherder-prefix":
        "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,shouldAlert:True,unit:a-unit"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert all([
        subtest["unit"] == "a-unit"
        for subtest in output["suites"][0]["subtests"]
        if "resource" in subtest["name"]
    ])
    assert all([
        subtest["unit"] == "ms" for subtest in output["suites"][0]["subtests"]
        if "firstPaint" in subtest["name"]
    ])
コード例 #2
0
def test_perfherder_with_extra_options():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [
            metric_fields("name:firstPaint,extraOptions:['option']"),
            metric_fields("name:resource,extraOptions:['second-option']"),
        ],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    assert len(output["suites"]) == 1
    assert sorted(output["suites"][0]["extraOptions"]) == sorted(
        ["option", "second-option"]
    )
コード例 #3
0
ファイル: test_perfherder.py プロジェクト: ruturajv/gecko-dev
def test_perfherder_metrics_filtering():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("I shouldn't match a metric")],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "nodata.json")
        with nodatajson.open("w") as f:
            json.dump({}, f)

        metadata.add_result({
            "results": str(nodatajson),
            "name": "browsertime"
        })

        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

            assert not pathlib.Path(output, "perfherder-data.json").exists()
コード例 #4
0
def test_perfherder_exlude_stats():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint")],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint metric was obtained with 2 replicates
    assert len(output["suites"][0]["subtests"][0]["replicates"]) == 2
    assert (
        "browserScripts.timings.firstPaint"
        == output["suites"][0]["subtests"][0]["name"]
    )
コード例 #5
0
def test_perfherder_split_by():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "fenix",
        "perfherder-metrics": [metric_fields("firstPaint")],
        "perfherder-split-by": "browserScripts.pageinfo.url",
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Sanity check
    assert len(output["suites"]) == 1

    # We should have 2 subtests (1 per URL)
    assert len(output["suites"][0]["subtests"]) == 2

    # Check to make sure that they were properly split
    names = [subtest["name"] for subtest in output["suites"][0]["subtests"]]
    assert sorted(names) == [
        "browserScripts.timings.firstPaint https://www.mozilla.org/en-US/",
        "browserScripts.timings.firstPaint https://www.sitespeed.io/",
    ]
    for i in range(2):
        assert len(output["suites"][0]["subtests"][i]["replicates"]) == 1
コード例 #6
0
def test_compare_to_success(notebook, stats):
    options = {
        "notebook-metrics": [metric_fields("firstPaint")],
        "notebook-prefix": "",
        "notebook-analysis": [],
        "notebook": True,
        "notebook-compare-to": [str(BT_DATA.parent)],
        "notebook-stats": stats,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)

    args, kwargs = notebook.call_args_list[0]

    if not stats:
        assert len(kwargs["data"]) == 2
        assert kwargs["data"][0]["name"] == "browsertime- newest run"
        assert kwargs["data"][1]["name"] == "browsertime-results"
    else:
        assert any("statistics" in element["subtest"]
                   for element in kwargs["data"])

    notebook.assert_has_calls(
        [mock.call().post_to_iodide(["compare"], start_local_server=True)])
コード例 #7
0
def test_perfherder():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint")],
        "perfherder-timestamp": 1.0,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"
    assert output["pushTimestamp"] == 1.0

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 10
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint metrics were obtained
    for subtest in output["suites"][0]["subtests"]:
        assert "firstPaint" in subtest["name"]
コード例 #8
0
def test_perfherder_logcat():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("TimeToDisplayed")],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    def processor(groups):
        """Parses the time from a displayed time string into milliseconds."""
        return (float(groups[0]) * 1000) + float(groups[1])

    re_w_group = r".*Displayed.*org\.mozilla\.fennec_aurora.*\+([\d]+)s([\d]+)ms.*"
    metadata.add_result(
        {
            "results": str(HERE / "data" / "home_activity.txt"),
            "transformer": "LogCatTimeTransformer",
            "transformer-options": {
                "first-timestamp": re_w_group,
                "processor": processor,
                "transform-subtest-name": "TimeToDisplayed",
            },
            "name": "LogCat",
        }
    )

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m:  # , silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert len(output["suites"][0]["subtests"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only the TimeToDisplayd metric was obtained
    for subtest in output["suites"][0]["subtests"]:
        assert "TimeToDisplayed" in subtest["name"]
コード例 #9
0
def test_perfherder_bad_app_name():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "this is not an app",
        "perfherder-metrics": [metric_fields("firstPaint")],
    }

    metrics, metadata, env = setup_env(options)

    # This will raise an error because the options method
    # we use in tests skips the `choices` checks.
    with pytest.raises(jsonschema.ValidationError):
        with temp_file() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)
コード例 #10
0
def test_compare_to_invalid_parameter(notebook, filepath):
    options = {
        "notebook-metrics": [metric_fields("firstPaint")],
        "notebook-prefix": "",
        "notebook-analysis": [],
        "notebook": True,
        "notebook-compare-to": [filepath],
    }

    metrics, metadata, env = setup_env(options)

    with pytest.raises(Exception) as einfo:
        with temp_file() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

    if filepath == "invalidPath":
        assert "does not exist" in str(einfo.value)
    else:
        assert "not a directory" in str(einfo.value)
コード例 #11
0
def test_perfherder_app_name():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-app": "fenix",
        "perfherder-metrics": [metric_fields("firstPaint")],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Make sure that application setting is correct
    assert output["application"]["name"] == "fenix"
    assert "version" not in output["application"]
コード例 #12
0
def test_metric_fields_new_format():
    assert metric_fields("name:foo,extraOptions:bar") == {
        "name": "foo",
        "extraOptions": "bar",
    }
コード例 #13
0
def test_metric_fields_old_format():
    assert metric_fields("firstPaint") == {"name": "firstPaint"}
コード例 #14
0
def test_perfherder_simple_names():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")],
        "perfherder-simplify-names": True,
        "perfherder-simplify-exclude": ["statistics"],
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert output["suites"][0]["value"] > 0

    # Check if only firstPaint/resource metrics were obtained and
    # that simplifications occurred
    assert all(
        [
            "firstPaint" in subtest["name"]
            or "duration" in subtest["name"]
            or "count" in subtest["name"]
            for subtest in output["suites"][0]["subtests"]
        ]
    )

    found_all = {"firstPaint": False, "count": False, "duration": False}
    for subtest in output["suites"][0]["subtests"]:
        if subtest["name"] in found_all:
            found_all[subtest["name"]] = True
            continue
        assert any([name in subtest["name"] for name in found_all.keys()])
        # Statistics are not simplified so any metric that isn't
        # in the list of known metrics must be a statistic
        assert "statistics" in subtest["name"]

    for entry, value in found_all.items():
        assert found_all[entry], f"Failed finding metric simplification for {entry}"

    # Statistics are not simplified by default
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" in subtest["name"]
            ]
        )
        == 27
    )
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" not in subtest["name"]
            ]
        )
        == 3
    )
コード例 #15
0
def test_metric_fields_simple(metrics, expected):
    assert metric_fields(metrics) == expected
コード例 #16
0
def test_metric_fields_complex(metrics, expected):
    assert metric_fields(metrics) == expected
コード例 #17
0
def test_metric_fields_complex_failures(metrics):
    with pytest.raises(Exception):
        metric_fields(metrics)
コード例 #18
0
def test_perfherder_names_simplified_with_no_exclusions():
    options = {
        "perfherder": True,
        "perfherder-stats": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("firstPaint"), metric_fields("resource")],
        "perfherder-simplify-names": True,
    }

    metrics, metadata, env = setup_env(options)

    with temp_file() as output:
        env.set_arg("output", output)
        with metrics as m, silence():
            m(metadata)
        output_file = metadata.get_output()
        with open(output_file) as f:
            output = json.loads(f.read())

    # Check some metadata
    assert output["application"]["name"] == "firefox"
    assert output["framework"]["name"] == "browsertime"

    # Check some numbers in our data
    assert len(output["suites"]) == 1
    assert output["suites"][0]["value"] > 0

    # In this case, some metrics will be called "median", "mean", etc.
    # since those are the simplifications of the first statistics entries
    # that were found.
    assert not all(
        [
            "firstPaint" in subtest["name"]
            or "duration" in subtest["name"]
            or "count" in subtest["name"]
            for subtest in output["suites"][0]["subtests"]
        ]
    )

    found_all = {"firstPaint": False, "count": False, "duration": False}
    for subtest in output["suites"][0]["subtests"]:
        if subtest["name"] in found_all:
            found_all[subtest["name"]] = True
            continue

    for entry, value in found_all.items():
        assert found_all[entry], f"Failed finding metric simplification for {entry}"

    # Only a portion of the metrics should still have statistics in
    # their name due to a naming conflict that only emits a warning
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" in subtest["name"]
            ]
        )
        == 18
    )
    assert (
        len(
            [
                subtest
                for subtest in output["suites"][0]["subtests"]
                if "statistics" not in subtest["name"]
            ]
        )
        == 12
    )