Beispiel #1
0
def test_common_prefix():
    contents = [{
        "telemetry": {
            "network_latency": {
                "type": "quantity",
                "gecko_datapoint": "GC_NETWORK_LATENCY",
                "unit": "ms",
            },
            "network_bandwidth": {
                "type": "quantity",
                "gecko_datapoint": "GC_NETWORK_BANDWIDTH",
                "unit": "kbps",
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert nits[0][0] == "COMMON_PREFIX"

    # Now make sure the override works
    contents[0]["no_lint"] = ["COMMON_PREFIX"]
    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 0
Beispiel #2
0
def test_user_lifetime_expiration():
    """Test that expiring 'user' lifetime metrics generate a warning."""
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "lifetime": "user",
                "expires": "2100-01-01",
            },
            "string": {
                "type": "string",
                "lifetime": "user",
                "expires": "never"
            },
            "other": {
                "type": "string",
                "lifetime": "application"
            },
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert set(["USER_LIFETIME_EXPIRATION"]) == set(v.check_name for v in nits)
Beispiel #3
0
def test_combined():
    contents = [{
        "metrics": {
            "m_network_latency_ms": {
                "type": "timespan",
                "time_unit": "millisecond",
            },
            "m_memory_usage_mb": {
                "type": "memory_distribution",
                "memory_unit": "megabyte",
            },
            "m_width_pixels": {
                "type": "quantity",
                "gecko_datapoint": "WIDTH_PIXELS",
                "unit": "pixels",
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 5
    assert set(["COMMON_PREFIX", "CATEGORY_GENERIC",
                "UNIT_IN_NAME"]) == set(v[0] for v in nits)
Beispiel #4
0
def test_baseline_restriction():
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "send_in_pings": ["baseline"]
            },
            "string": {
                "type": "string",
                "send_in_pings": ["metrics", "baseline"]
            },
            "string2": {
                "type": "string",
                "send_in_pings": ["metrics"]
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 2
    assert set(["BASELINE_PING"]) == set(v.check_name for v in nits)
Beispiel #5
0
def test_gifft_codegen():
    """
    A regression test. Very fragile.
    It generates C++ for GIFFT for metrics_test.yaml and compares it
    byte-for-byte with expected output C++ files.
    To generate new expected output files, edit t/c/g/metrics_index.py,
    comment out the other metrics yamls, and add one for
    t/c/g/pytest/metrics_test.yaml. Run `mach build` (it'll fail). Copy
    objdir/t/c/g/XGIFFTMap.h to gifft_output_X.
    Don't forget to undo your edits to metrics_index.py after you're done.
    """

    options = {"allow_reserved": False}
    here_path = Path(path.dirname(__file__))
    input_files = [here_path / "metrics_test.yaml"]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    all_objs = all_objs.value
    for probe_type in ("Event", "Histogram", "Scalar"):
        output_fd = io.StringIO()
        cpp_fd = io.StringIO()
        run_glean_parser.output_gifft_map(output_fd, probe_type, all_objs,
                                          cpp_fd)

        with open(here_path / f"gifft_output_{probe_type}", "r") as file:
            EXPECTED = file.read()
        assert output_fd.getvalue() == EXPECTED

        if probe_type == "Event":
            with open(here_path / "gifft_output_EventExtra", "r") as file:
                EXPECTED = file.read()
            assert cpp_fd.getvalue() == EXPECTED
Beispiel #6
0
def test_fake_pings():
    """Another similarly-fragile test.
    It generates C++ for pings_test.yaml, comparing it byte-for-byte
    with an expected output C++ file `pings_test_output_cpp`.
    Expect it to be fragile.
    To generate a new expected output file, edit t/c/g/metrics_index.py,
    comment out all other ping yamls, and add one for
    t/c/g/pytest/pings_test.yaml. Run `mach build` (it'll fail). Copy
    objdir/t/c/g/GleanPings.h over pings_test_output_cpp.
    (Don't forget to undo your edits to t/c/g/metrics_index.py)
    """

    options = {"allow_reserved": False}
    input_files = [Path(path.join(path.dirname(__file__), "pings_test.yaml"))]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    output_fd = io.StringIO()
    cpp.output_cpp(all_objs.value, output_fd, options)

    with open(path.join(path.dirname(__file__), "pings_test_output_cpp"), "r") as file:
        EXPECTED_CPP = file.read()
    assert output_fd.getvalue() == EXPECTED_CPP
Beispiel #7
0
def test_misspelling_pings():
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "send_in_pings": ["metric"]
            },
            "string": {
                "type": "string",
                "send_in_pings": ["event"]
            },
            "string2": {
                "type": "string",
                "send_in_pings": ["metrics", "events"]
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 2
    assert set(["MISSPELLED_PING"]) == set(v.check_name for v in nits)
def test_all_metric_types():
    """ Honestly, this is a pretty bad test.
        It generates Rust for a given test metrics.yaml and compares it byte-for-byte
        with an expected output Rust file.
        Expect it to be fragile.
        To generate a new expected output file, copy the test yaml over the one in t/c/g,
        run mach build, then copy the rust output from objdir/t/c/g/api/src/.
    """

    options = {"allow_reserved": False}
    input_files = [
        Path(path.join(path.dirname(__file__), "metrics_test.yaml"))
    ]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    output_fd = io.StringIO()
    rust.output_rust(all_objs.value, output_fd, options)

    with open(path.join(path.dirname(__file__), "metrics_test_output"),
              'r') as file:
        EXPECTED_RUST = file.read()
    assert output_fd.getvalue() == EXPECTED_RUST
Beispiel #9
0
def parse(args):
    """
    Parse and lint the input files,
    then return the parsed objects for further processing.
    """

    # Unfortunately, GeneratedFile appends `flags` directly after `inputs`
    # instead of listifying either, so we need to pull stuff from a *args.
    yaml_array = args[:-1]
    moz_app_version = args[-1]

    input_files = [Path(x) for x in yaml_array]

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd, and to expire on versions not dates.

    options = get_parser_options(moz_app_version)
    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    if lint.lint_metrics(all_objs.value, options):
        # Treat Warnings as Errors in FOG
        sys.exit(1)

    return all_objs.value, options
Beispiel #10
0
def main(output_fd, metrics_index_path, which_array):

    # Source the list of input files from `metrics_index.py`
    sys.path.append(str(Path(metrics_index_path).parent))
    from metrics_index import METRICS, PINGS
    if which_array == 'METRICS':
        input_files = METRICS
    elif which_array == 'PINGS':
        input_files = PINGS
    else:
        print("Build system's asking for unknown array {}".format(which_array))
        sys.exit(1)

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd.
    options = {"allow_reserved": False}
    input_files = [Path(x) for x in input_files]

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    if lint.lint_metrics(all_objs.value, options):
        # Treat Warnings as Errors in FOG
        sys.exit(1)

    rust.output_rust(all_objs.value, output_fd, options)
Beispiel #11
0
def parse(args):
    """
    Parse and lint the input files,
    then return the parsed objects for further processing.
    """

    # Unfortunately, GeneratedFile appends `flags` directly after `inputs`
    # instead of listifying either, so we need to pull stuff from a *args.
    yaml_array = args[:-1]
    moz_app_version = args[-1]

    input_files = [Path(x) for x in yaml_array]

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd, and to expire on versions not dates.

    options = get_parser_options(moz_app_version)

    # Lint the yaml first, then lint the metrics.
    if lint.lint_yaml_files(input_files, parser_config=options):
        # Warnings are Errors
        sys.exit(1)

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    nits = lint.lint_metrics(all_objs.value, options)
    if nits is not None and any(nit.check_name != "EXPIRED" for nit in nits):
        # Treat Warnings as Errors in FOG.
        # But don't fail the whole build on expired metrics (it blocks testing).
        sys.exit(1)

    return all_objs.value, options
Beispiel #12
0
def test_category_generic():
    contents = [{"metrics": {"measurement": {"type": "boolean"}}}]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert nits[0][0] == "CATEGORY_GENERIC"

    contents[0]["no_lint"] = ["CATEGORY_GENERIC"]
    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 0
Beispiel #13
0
def test_unit_in_name():
    contents = [{
        "telemetry": {
            "network_latency_ms": {
                "type": "timespan",
                "time_unit": "millisecond"
            },
            "memory_usage_mb": {
                "type": "memory_distribution",
                "memory_unit": "megabyte",
            },
            "width_pixels": {
                "type": "quantity",
                "gecko_datapoint": "WIDTH_PIXELS",
                "unit": "pixels",
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 3
    assert all(nit[0] == "UNIT_IN_NAME" for nit in nits)

    # Now make sure the override works
    contents[0]["telemetry"]["network_latency_ms"]["no_lint"] = [
        "UNIT_IN_NAME"
    ]
    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 2
Beispiel #14
0
def test_no_metrics_expired():
    """
    Of all the metrics included in this build, are any expired?
    If so, they must be removed or renewed.

    (This also checks other lints, as a treat.)
    """
    with open("browser/config/version.txt", "r") as version_file:
        app_version = version_file.read().strip()

    options = run_glean_parser.get_parser_options(app_version)
    metrics_paths = [Path(x) for x in metrics_yamls]
    all_objs = parser.parse_objects(metrics_paths, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)
def main(output_fd, *metrics_yamls):
    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd.
    options = {"allow_reserved": False}
    input_files = [Path(x) for x in metrics_yamls]

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    if lint.lint_metrics(all_objs.value, options):
        # Treat Warnings as Errors in FOG
        sys.exit(1)

    rust.output_rust(all_objs.value, output_fd, options)
Beispiel #16
0
def parse_with_options(input_files, options):
    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd, and to expire on versions not dates.

    # Lint the yaml first, then lint the metrics.
    if lint.lint_yaml_files(input_files, parser_config=options):
        # Warnings are Errors
        raise ParserError("linter found problems")

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        raise ParserError("found validation errors during parse")

    nits = lint.lint_metrics(all_objs.value, options)
    if nits is not None and any(nit.check_name != "EXPIRED" for nit in nits):
        # Treat Warnings as Errors in FOG.
        # But don't fail the whole build on expired metrics (it blocks testing).
        raise ParserError("glinter nits found during parse")

    objects = all_objs.value

    # bug 1720494: This should be a simple call to translate.transform
    counters = {}
    numerators_by_denominator: Dict[str, Any] = {}
    for category_val in objects.values():
        for metric in category_val.values():
            fqmn = metric.identifier()
            if getattr(metric, "type", None) == "counter":
                counters[fqmn] = metric
            denominator_name = getattr(metric, "denominator_metric", None)
            if denominator_name:
                metric.type = "numerator"
                numerators_by_denominator.setdefault(denominator_name, [])
                numerators_by_denominator[denominator_name].append(metric)

    for denominator_name, numerators in numerators_by_denominator.items():
        if denominator_name not in counters:
            print(
                f"No `counter` named {denominator_name} found to be used as"
                "denominator for {numerator_names}",
                file=sys.stderr,
            )
            raise ParserError("rate couldn't find denominator")
        counters[denominator_name].type = "denominator"
        counters[denominator_name].numerators = numerators

    return objects, options
Beispiel #17
0
def test_superfluous():
    contents = [{
        "telemetry": {
            "network_latency": {
                "type": "timespan",
                "time_unit": "millisecond",
                "no_lint": ["UNIT_IN_NAME"],
            }
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert all(nit[0] == "SUPERFLUOUS_NO_LINT" for nit in nits)
    assert all("UNIT_IN_NAME" in nit[2] for nit in nits)
def test_expires_version():
    """This test relies on the intermediary object format output by glean_parser.
    Expect it to be fragile on glean_parser updates that change that format.
    """

    # The test file has 41, 42, 100. Use 42.0a1 here to ensure "expires == version" means expired.
    options = run_glean_parser.get_parser_options("42.0a1")
    input_files = [
        Path(
            path.join(path.dirname(__file__),
                      "metrics_expires_versions_test.yaml"))
    ]

    all_objs = parser.parse_objects(input_files, options)

    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    assert all_objs.value["test"]["expired1"].disabled is True
    assert all_objs.value["test"]["expired2"].disabled is True
    assert all_objs.value["test"]["unexpired"].disabled is False
Beispiel #19
0
def test_warnings():
    # SUPERFLUOUS_NO_LINT is a warning, so it shouldn't return an error code
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "send_in_pings": ["metrics"],
                "no_lint": ["UNIT_IN_NAME"],
            },
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert not any(x.check_type == lint.CheckType.error for x in nits)
    assert len(nits) == 1
    assert nits[0].check_name == "SUPERFLUOUS_NO_LINT"