Beispiel #1
0
def test_perfherder_metrics_filtering():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": ["I shouldn't match a metric"],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "nodata.json")
        with nodatajson.open("w") as f:
            json.dump({}, f)

        metadata.add_result({
            "results": str(nodatajson),
            "name": "browsertime"
        })

        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

            assert not pathlib.Path(output, "perfherder-data.json").exists()
Beispiel #2
0
def files(data):
    # Create a temporary directory.
    with temp_dir() as td:
        tmp_path = pathlib.Path(td)

    dirs = {
        "resources": tmp_path / "resources",
        "output": tmp_path / "output",
    }

    for d in dirs.values():
        d.mkdir(parents=True, exist_ok=True)

    # Create temporary data files for tests.
    def _create_temp_files(path, data):
        path.touch(exist_ok=True)
        path.write_text(data)
        return path.resolve().as_posix()

    resources = {}
    json_1 = dirs["resources"] / "file_1.json"
    resources["file_1"] = _create_temp_files(json_1,
                                             json.dumps(data["data_1"]))

    json_2 = dirs["resources"] / "file_2.json"
    resources["file_2"] = _create_temp_files(json_2,
                                             json.dumps(data["data_2"]))

    txt_3 = dirs["resources"] / "file_3.txt"
    resources["file_3"] = _create_temp_files(txt_3, str(data["data_3"]))

    output = dirs["output"] / "output.json"

    yield resources, dirs, output.resolve().as_posix()
Beispiel #3
0
def test_android_log(*mocked):
    with temp_file() as logcat, temp_dir() as output:
        args = {
            "flavor": "mobile-browser",
            "android-install-apk": ["this.apk"],
            "android": True,
            "console": True,
            "android-timeout": 30,
            "android-capture-adb": "stdout",
            "android-capture-logcat": logcat,
            "android-app-name": "org.mozilla.fenix",
            "androidlog": True,
            "output": output,
        }

        mach_cmd, metadata, env = get_running_env(**args)
        env.set_arg("tests", [EXAMPLE_TEST])

        with env.layers[SYSTEM] as sys, env.layers[TEST] as andro:
            metadata = sys(andro(metadata))

        # we want to drop the first result
        metadata._results = metadata._results[1:]
        with env.layers[METRICS] as metrics:
            metadata = metrics(metadata)

        assert pathlib.Path(output, "LogCatstd-output.json").exists()
Beispiel #4
0
def test_perfherder_validation_failure():
    options = {"perfherder": True, "perfherder-prefix": ""}

    metrics, metadata, env = setup_env(options)

    # Perfherder schema has limits on min/max data values. Having
    # no metrics in the options will cause a failure because of the
    # timestamps that are picked up from browsertime.
    with pytest.raises(jsonschema.ValidationError):
        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)
Beispiel #5
0
def test_perfherder_missing_data_failure():
    options = {"perfherder": True, "perfherder-prefix": ""}

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "baddata.json")
        with nodatajson.open("w") as f:
            json.dump({"bad data": "here"}, f)

        metadata.add_result({
            "results": str(nodatajson),
            "name": "browsertime"
        })

        with pytest.raises(PerfherderValidDataError):
            with temp_file() as output:
                env.set_arg("output", output)
                with metrics as m, silence():
                    m(metadata)
Beispiel #6
0
def test_console_output(*mocked):
    with temp_dir() as tempdir:
        options = {
            "console-prefix": "",
            "console": True,
            "output": tempdir,
        }
        mach_cmd, metadata, env = get_running_env(**options)
        runs = []

        def _run_process(*args, **kw):
            runs.append((args, kw))

        mach_cmd.run_process = _run_process
        metrics = env.layers[METRICS]
        env.set_arg("tests", [EXAMPLE_TEST])
        res = {"name": "name", "results": [str(BT_DATA)]}
        metadata.add_result(res)

        with metrics as console, silence():
            console(metadata)