Ejemplo n.º 1
0
def test_perfherder_metrics_filtering():
    options = {
        "perfherder": True,
        "perfherder-prefix": "",
        "perfherder-metrics": [metric_fields("I shouldn't match a metric")],
    }

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "nodata.json")
        with nodatajson.open("w") as f:
            json.dump({}, f)

        metadata.add_result({
            "results": str(nodatajson),
            "name": "browsertime"
        })

        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)

            assert not pathlib.Path(output, "perfherder-data.json").exists()
Ejemplo n.º 2
0
def test_android_log(*mocked):
    with temp_file() as logcat, temp_dir() as output:
        args = {
            "flavor": "mobile-browser",
            "android-install-apk": ["this.apk"],
            "android": True,
            "console": True,
            "android-timeout": 30,
            "android-capture-adb": "stdout",
            "android-capture-logcat": logcat,
            "android-app-name": "org.mozilla.fenix",
            "androidlog": True,
            "output": output,
        }

        mach_cmd, metadata, env = get_running_env(**args)
        env.set_arg("tests", [EXAMPLE_TEST])

        with env.layers[SYSTEM] as sys, env.layers[TEST] as andro:
            metadata = andro(sys(metadata))

        # we want to drop the first result
        metadata._results = metadata._results[1:]
        with env.layers[METRICS] as metrics:
            metadata = metrics(metadata)

        assert pathlib.Path(output, "LogCatstd-output.json").exists()
Ejemplo n.º 3
0
def test_influx_service(on_try):
    if ON_TRY and sys.platform == "darwin":
        # macos slave in the CI are restricted
        return

    mocks()
    with running_on_try(on_try), temp_dir() as output:
        args = {
            "verbose": True,
            "output": output,
            "perfboard-influx-password": "******",
            "perfboard-grafana-key": "xxx",
            "perfboard-grafana-host": "grafana",
            "perfboard-influx-port": 0,
            "perfboard-influx-host": "influxdb",
            "tests": [EXAMPLE_TEST],
        }

        mach_cmd, metadata, env = get_running_env(**args)
        metadata.add_result({"results": str(BT_DATA), "name": "browsertime"})
        layer = Influx(env, mach_cmd)
        layer.setup()
        try:
            metadata = layer.run(metadata)
        finally:
            layer.teardown()

    index = on_try and 2 or 1
    sent_data = responses.calls[index].request.body.split(b"\n")
    fields = [line.split(b",")[0].strip() for line in sent_data]
    assert b"rumspeedindex" in fields

    responses.reset()
Ejemplo n.º 4
0
    def run(self, metadata):
        tests = self.get_arg("tests", [])
        if len(tests) != 1:
            # for now we support one single test
            raise NotImplementedError(str(tests))

        test = Path(tests[0])
        if not test.exists():
            raise FileNotFoundError(str(test))

        # let's grab the manifest
        manifest = Path(test.parent, "xpcshell.ini")
        if not manifest.exists():
            raise FileNotFoundError(str(manifest))

        import runxpcshelltests

        xpcshell = runxpcshelltests.XPCShellTests(log=self)
        kwargs = {}
        kwargs["testPaths"] = test.name
        kwargs["verbose"] = True
        kwargs["xpcshell"] = self.mach_cmd.get_binary_path("xpcshell")
        kwargs["mozInfo"] = str(Path(self.topobjdir, "mozinfo.json"))
        kwargs["symbolsPath"] = str(Path(self.distdir, "crashreporter-symbols"))
        kwargs["logfiles"] = True
        kwargs["profileName"] = "firefox"
        kwargs["pluginsPath"] = str(Path(self.distdir, "plugins"))
        kwargs["testingModulesDir"] = str(Path(self.topobjdir, "_tests/modules"))
        kwargs["utility_path"] = self.bindir
        kwargs["manifest"] = manifest
        kwargs["totalChunks"] = 1
        cycles = self.get_arg("cycles", 1)
        self.info("Running %d cycles" % cycles)

        for cycle in range(cycles):
            with temp_dir() as tmp, silence():
                kwargs["tempDir"] = tmp
                if not xpcshell.runTests(kwargs):
                    raise XPCShellTestError()
            self.info("Cycle %d" % (cycle + 1))
        self.info("tests done.")

        results = defaultdict(list)
        for m in self.metrics:
            for key, val in m.items():
                results[key].append(val)

        metadata.add_result(
            {
                "name": test.name,
                "framework": {"name": "xpcshell"},
                "transformer": "mozperftest.test.xpcshell:XPCShellData",
                "results": [
                    {"values": measures, "name": subtest}
                    for subtest, measures in results.items()
                ],
            }
        )

        return metadata
Ejemplo n.º 5
0
def test_perfherder_validation_failure():
    options = {"perfherder": True, "perfherder-prefix": ""}

    metrics, metadata, env = setup_env(options)

    # Perfherder schema has limits on min/max data values. Having
    # no metrics in the options will cause a failure because of the
    # timestamps that are picked up from browsertime.
    with pytest.raises(jsonschema.ValidationError):
        with temp_dir() as output:
            env.set_arg("output", output)
            with metrics as m, silence():
                m(metadata)
Ejemplo n.º 6
0
def test_perfherder_missing_data_failure():
    options = {"perfherder": True, "perfherder-prefix": ""}

    metrics, metadata, env = setup_env(options)
    metadata.clear_results()

    with temp_dir() as tmpdir:
        nodatajson = pathlib.Path(tmpdir, "baddata.json")
        with nodatajson.open("w") as f:
            json.dump({"bad data": "here"}, f)

        metadata.add_result({"results": str(nodatajson), "name": "browsertime"})

        with pytest.raises(PerfherderValidDataError):
            with temp_file() as output:
                env.set_arg("output", output)
                with metrics as m, silence():
                    m(metadata)
Ejemplo n.º 7
0
def test_ping_server():
    if ON_TRY and sys.platform == "darwin":
        # macos slave in the CI are restricted
        return
    ping_data = {"some": "data"}
    with temp_dir() as output:
        args = {"verbose": True, "output": output}
        mach_cmd, metadata, env = get_running_env(**args)
        layer = PingServer(env, mach_cmd)
        layer.setup()
        try:
            metadata = layer.run(metadata)
            # simulates a ping
            requests.post(
                layer.endpoint + "/submit/something", data=json.dumps(ping_data)
            )
        finally:
            layer.teardown()

        with Path(output, "telemetry.json").open() as f:
            assert json.loads(f.read()) == [ping_data]
Ejemplo n.º 8
0
def test_console_output(*mocked):
    with temp_dir() as tempdir:
        options = {
            "console-prefix": "",
            "console": True,
            "output": tempdir,
        }
        mach_cmd, metadata, env = get_running_env(**options)
        runs = []

        def _run_process(*args, **kw):
            runs.append((args, kw))

        mach_cmd.run_process = _run_process
        metrics = env.layers[METRICS]
        env.set_arg("tests", [EXAMPLE_TEST])
        res = {"name": "name", "results": [str(BT_DATA)]}
        metadata.add_result(res)

        with metrics as console, silence():
            console(metadata)
Ejemplo n.º 9
0
def files(data):
    # Create a temporary directory.
    with temp_dir() as td:
        tmp_path = pathlib.Path(td)

    dirs = {
        "resources": tmp_path / "resources",
        "output": tmp_path / "output",
    }

    for d in dirs.values():
        d.mkdir(parents=True, exist_ok=True)

    # Create temporary data files for tests.
    def _create_temp_files(path, data):
        path.touch(exist_ok=True)
        path.write_text(data)
        return path.resolve().as_posix()

    resources = {}
    json_1 = dirs["resources"] / "file_1.json"
    resources["file_1"] = _create_temp_files(json_1,
                                             json.dumps(data["data_1"]))

    json_2 = dirs["resources"] / "file_2.json"
    resources["file_2"] = _create_temp_files(json_2,
                                             json.dumps(data["data_2"]))

    txt_3 = dirs["resources"] / "file_3.txt"
    resources["file_3"] = _create_temp_files(txt_3, str(data["data_3"]))

    output = dirs["output"] / "output.json"

    yield {
        "resources": resources,
        "dirs": dirs,
        "output": output,
    }
Ejemplo n.º 10
0
    def run(self, metadata):
        tests = self.get_arg("tests", [])
        if len(tests) != 1:
            # for now we support one single test
            raise NotImplementedError(str(tests))

        test = Path(tests[0])
        if not test.exists():
            raise FileNotFoundError(str(test))

        # let's grab the manifest
        manifest = Path(test.parent, "xpcshell.ini")
        if not manifest.exists():
            raise FileNotFoundError(str(manifest))

        nodejs = self.get_arg("nodejs")
        if nodejs is not None:
            os.environ["MOZ_NODE_PATH"] = nodejs

        import runxpcshelltests

        verbose = self.get_arg("verbose")
        xpcshell = runxpcshelltests.XPCShellTests(log=self)
        kwargs = {}
        kwargs["testPaths"] = test.name
        kwargs["verbose"] = verbose
        binary = self.get_arg("binary")
        if binary is None:
            binary = self.mach_cmd.get_binary_path("xpcshell")
        kwargs["xpcshell"] = binary
        binary = Path(binary)
        mozinfo = self.get_arg("mozinfo")
        if mozinfo is None:
            mozinfo = binary.parent / ".." / "mozinfo.json"
            if not mozinfo.exists():
                mozinfo = Path(self.topobjdir, "mozinfo.json")
        else:
            mozinfo = Path(mozinfo)

        kwargs["mozInfo"] = str(mozinfo)
        kwargs["symbolsPath"] = str(Path(self.distdir,
                                         "crashreporter-symbols"))
        kwargs["logfiles"] = True
        kwargs["profileName"] = "firefox"
        plugins = binary.parent / "plugins"
        if not plugins.exists():
            plugins = Path(self.distdir, "plugins")
        kwargs["pluginsPath"] = str(plugins)
        modules = binary.parent / "modules"
        if not modules.exists():
            modules = Path(self.topobjdir, "_tests", "modules")
        kwargs["testingModulesDir"] = str(modules)
        kwargs["utility_path"] = self.bindir
        kwargs["manifest"] = str(manifest)
        kwargs["totalChunks"] = 1
        xre_path = self.get_arg("xre-path")
        if xre_path is not None:
            self.info(f"Copying {xre_path} elements to {binary.parent}")
            copy_tree(xre_path, str(binary.parent), update=True)

        http3server = binary.parent / "http3server"
        if http3server.exists():
            kwargs["http3server"] = str(http3server)

        cycles = self.get_arg("cycles", 1)
        self.info("Running %d cycles" % cycles)

        for cycle in range(cycles):
            self.info("Cycle %d" % (cycle + 1))
            with temp_dir() as tmp:
                kwargs["tempDir"] = tmp
                if not xpcshell.runTests(kwargs):
                    raise XPCShellTestError()

        self.info("tests done.")

        results = defaultdict(list)
        for m in self.metrics:
            for key, val in m.items():
                results[key].append(val)

        metadata.add_result({
            "name":
            test.name,
            "framework": {
                "name": "mozperftest"
            },
            "transformer":
            "mozperftest.test.xpcshell:XPCShellData",
            "results": [{
                "values": measures,
                "name": subtest
            } for subtest, measures in results.items()],
        })

        return metadata