Exemple #1
0
def test_memory_distribution():
    # Test that we get an error for a missing unit
    contents = [{"category": {"metric": {"type": "memory_distribution"}}}]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert ("`memory_distribution` is missing required parameter `memory_unit`"
            in errors[0])

    # Test that memory_distribution works
    contents = [{
        "category": {
            "metric": {
                "type": "memory_distribution",
                "memory_unit": "megabyte"
            }
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 0
    assert len(all_metrics.value) == 1
    all_metrics.value["category"][
        "metric"].memory_unit == metrics.MemoryUnit.megabyte
Exemple #2
0
def test_all_pings_reserved():
    # send_in_pings: [all_pings] is only allowed for internal metrics
    contents = [{
        "category": {
            "metric": {
                "type": "string",
                "send_in_pings": ["all_pings"]
            }
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert "On instance category.metric" in errors[0]
    assert "Only internal metrics" in errors[0]

    all_metrics = parser.parse_objects(contents, {"allow_reserved": True})
    errors = list(all_metrics)
    assert len(errors) == 0

    # A custom ping called "all_pings" is not allowed
    contents = [{"all_pings": {"include_client_id": True}}]
    contents = [util.add_required_ping(x) for x in contents]
    all_pings = parser.parse_objects(contents)
    errors = list(all_pings)
    assert len(errors) == 1
    assert "is not allowed for 'all_pings'"
Exemple #3
0
def test_quantity():
    # Test that we get an error for a missing unit and gecko_datapoint
    contents = [{"category": {"metric": {"type": "quantity"}}}]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 2
    assert any("`quantity` is missing required parameter `unit`" in err
               for err in errors)
    assert any("is only allowed for Gecko metrics" in err for err in errors)

    # Test that quantity works
    contents = [{
        "category": {
            "metric": {
                "type": "quantity",
                "unit": "pixel",
                "gecko_datapoint": "FOO",
            }
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 0
    assert len(all_metrics.value) == 1
    all_metrics.value["category"]["metric"].unit == "pixel"
Exemple #4
0
def test_common_prefix():
    contents = [{
        "telemetry": {
            "network_latency": {
                "type": "quantity",
                "gecko_datapoint": "GC_NETWORK_LATENCY",
                "unit": "ms",
            },
            "network_bandwidth": {
                "type": "quantity",
                "gecko_datapoint": "GC_NETWORK_BANDWIDTH",
                "unit": "kbps",
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert nits[0][0] == "COMMON_PREFIX"

    # Now make sure the override works
    contents[0]["no_lint"] = ["COMMON_PREFIX"]
    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 0
Exemple #5
0
def test_geckoview_only_on_valid_metrics():
    for metric in [
            "timing_distribution",
            "custom_distributiuon",
            "memory_distribution",
    ]:
        contents = [{
            "category1": {
                "metric1": {
                    "type": metric,
                    "gecko_datapoint": "FOO"
                }
            }
        }]
        contents = [util.add_required(x) for x in contents]

        all_metrics = parser.parse_objects(contents)
        errs = list(all_metrics)

    contents = [{
        "category1": {
            "metric1": {
                "type": "event",
                "gecko_datapoint": "FOO"
            }
        }
    }]
    contents = [util.add_required(x) for x in contents]

    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 1
    assert "is only allowed for" in str(errs[0])
Exemple #6
0
def test_parser_reserved():
    contents = [{"glean.baseline": {"metric": {"type": "string"}}}]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert "For category 'glean.baseline'" in errors[0]

    all_metrics = parser.parse_objects(contents, {"allow_reserved": True})
    errors = list(all_metrics)
    assert len(errors) == 0
Exemple #7
0
def test_invalid_names(location, name):
    contents = location(name)
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert name in errors[0]
Exemple #8
0
def test_merge_metrics():
    """Merge multiple metrics.yaml files"""
    contents = [
        {
            "category1": {
                "metric1": {},
                "metric2": {}
            },
            "category2": {
                "metric3": {}
            }
        },
        {
            "category1": {
                "metric4": {}
            },
            "category3": {
                "metric5": {}
            }
        },
    ]
    contents = [util.add_required(x) for x in contents]

    all_metrics = parser.parse_objects(contents)
    list(all_metrics)
    all_metrics = all_metrics.value

    assert set(all_metrics["category1"].keys()) == set(
        ["metric1", "metric2", "metric4"])
    assert set(all_metrics["category2"].keys()) == set(["metric3"])
    assert set(all_metrics["category3"].keys()) == set(["metric5"])
def test_user_lifetime_expiration():
    """Test that expiring 'user' lifetime metrics generate a warning."""
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "lifetime": "user",
                "expires": "2100-01-01",
            },
            "string": {
                "type": "string",
                "lifetime": "user",
                "expires": "never"
            },
            "other": {
                "type": "string",
                "lifetime": "application"
            },
        }
    }]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert set(["USER_LIFETIME_EXPIRATION"]) == set(v.check_name for v in nits)
Exemple #10
0
def test_fake_pings():
    """Another similarly-fragile test.
    It generates C++ for pings_test.yaml, comparing it byte-for-byte
    with an expected output C++ file `pings_test_output_cpp`.
    Expect it to be fragile.
    To generate a new expected output file, edit t/c/g/metrics_index.py,
    comment out all other ping yamls, and add one for
    t/c/g/pytest/pings_test.yaml. Run `mach build` (it'll fail). Copy
    objdir/t/c/g/GleanPings.h over pings_test_output_cpp.
    (Don't forget to undo your edits to t/c/g/metrics_index.py)
    """

    options = {"allow_reserved": False}
    input_files = [Path(path.join(path.dirname(__file__), "pings_test.yaml"))]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    output_fd = io.StringIO()
    cpp.output_cpp(all_objs.value, output_fd, options)

    with open(path.join(path.dirname(__file__), "pings_test_output_cpp"), "r") as file:
        EXPECTED_CPP = file.read()
    assert output_fd.getvalue() == EXPECTED_CPP
Exemple #11
0
def parse(args):
    """
    Parse and lint the input files,
    then return the parsed objects for further processing.
    """

    # Unfortunately, GeneratedFile appends `flags` directly after `inputs`
    # instead of listifying either, so we need to pull stuff from a *args.
    yaml_array = args[:-1]
    moz_app_version = args[-1]

    input_files = [Path(x) for x in yaml_array]

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd, and to expire on versions not dates.

    options = get_parser_options(moz_app_version)

    # Lint the yaml first, then lint the metrics.
    if lint.lint_yaml_files(input_files, parser_config=options):
        # Warnings are Errors
        sys.exit(1)

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    nits = lint.lint_metrics(all_objs.value, options)
    if nits is not None and any(nit.check_name != "EXPIRED" for nit in nits):
        # Treat Warnings as Errors in FOG.
        # But don't fail the whole build on expired metrics (it blocks testing).
        sys.exit(1)

    return all_objs.value, options
Exemple #12
0
def test_gifft_codegen():
    """
    A regression test. Very fragile.
    It generates C++ for GIFFT for metrics_test.yaml and compares it
    byte-for-byte with expected output C++ files.
    To generate new expected output files, edit t/c/g/metrics_index.py,
    comment out the other metrics yamls, and add one for
    t/c/g/pytest/metrics_test.yaml. Run `mach build` (it'll fail). Copy
    objdir/t/c/g/XGIFFTMap.h to gifft_output_X.
    Don't forget to undo your edits to metrics_index.py after you're done.
    """

    options = {"allow_reserved": False}
    here_path = Path(path.dirname(__file__))
    input_files = [here_path / "metrics_test.yaml"]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    all_objs = all_objs.value
    for probe_type in ("Event", "Histogram", "Scalar"):
        output_fd = io.StringIO()
        cpp_fd = io.StringIO()
        run_glean_parser.output_gifft_map(output_fd, probe_type, all_objs,
                                          cpp_fd)

        with open(here_path / f"gifft_output_{probe_type}", "r") as file:
            EXPECTED = file.read()
        assert output_fd.getvalue() == EXPECTED

        if probe_type == "Event":
            with open(here_path / "gifft_output_EventExtra", "r") as file:
                EXPECTED = file.read()
            assert cpp_fd.getvalue() == EXPECTED
def test_translate_expires():
    contents = [{
        "metrics": {
            "a": {
                "type": "counter",
                "expires": "never"
            },
            "b": {
                "type": "counter",
                "expires": "expired"
            },
            "c": {
                "type": "counter",
                "expires": "2000-01-01"
            },
            "d": {
                "type": "counter",
                "expires": "2100-01-01"
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]

    objs = parser.parse_objects(contents)
    assert len(list(objs)) == 0
    objs = objs.value

    assert objs["metrics"]["a"].disabled is False
    assert objs["metrics"]["b"].disabled is True
    assert objs["metrics"]["c"].disabled is True
    assert objs["metrics"]["d"].disabled is False
Exemple #14
0
def test_combined():
    contents = [{
        "metrics": {
            "m_network_latency_ms": {
                "type": "timespan",
                "time_unit": "millisecond",
            },
            "m_memory_usage_mb": {
                "type": "memory_distribution",
                "memory_unit": "megabyte",
            },
            "m_width_pixels": {
                "type": "quantity",
                "gecko_datapoint": "WIDTH_PIXELS",
                "unit": "pixels",
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 5
    assert set(["COMMON_PREFIX", "CATEGORY_GENERIC",
                "UNIT_IN_NAME"]) == set(v[0] for v in nits)
Exemple #15
0
    def parse(self, filenames, config, repo_url=None, commit_hash=None):
        config = config.copy()
        config["do_not_disable_expired"] = True

        paths = [Path(fname) for fname in filenames]
        paths = [path for path in paths if path.is_file()]
        results = parse_objects(paths, config)
        errors = [err for err in results]

        metrics = {
            metric.identifier(): metric.serialize()
            for category, probes in results.value.items()
            for probe_name, metric in probes.items()
        }

        for v in metrics.values():
            v["send_in_pings"] = [
                normalize_ping_name(p) for p in v["send_in_pings"]
            ]
            if repo_url and commit_hash:
                v["source_url"] = get_source_url(v["defined_in"], repo_url,
                                                 commit_hash)
            # the 'defined_in' structure is no longer needed
            del v["defined_in"]
        return metrics, errors
Exemple #16
0
def parse(args):
    """
    Parse and lint the input files,
    then return the parsed objects for further processing.
    """

    # Unfortunately, GeneratedFile appends `flags` directly after `inputs`
    # instead of listifying either, so we need to pull stuff from a *args.
    yaml_array = args[:-1]
    moz_app_version = args[-1]

    input_files = [Path(x) for x in yaml_array]

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd, and to expire on versions not dates.

    options = get_parser_options(moz_app_version)
    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    if lint.lint_metrics(all_objs.value, options):
        # Treat Warnings as Errors in FOG
        sys.exit(1)

    return all_objs.value, options
Exemple #17
0
def main(output_fd, metrics_index_path, which_array):

    # Source the list of input files from `metrics_index.py`
    sys.path.append(str(Path(metrics_index_path).parent))
    from metrics_index import METRICS, PINGS
    if which_array == 'METRICS':
        input_files = METRICS
    elif which_array == 'PINGS':
        input_files = PINGS
    else:
        print("Build system's asking for unknown array {}".format(which_array))
        sys.exit(1)

    # Derived heavily from glean_parser.translate.translate.
    # Adapted to how mozbuild sends us a fd.
    options = {"allow_reserved": False}
    input_files = [Path(x) for x in input_files]

    all_objs = parser.parse_objects(input_files, options)
    if util.report_validation_errors(all_objs):
        sys.exit(1)

    if lint.lint_metrics(all_objs.value, options):
        # Treat Warnings as Errors in FOG
        sys.exit(1)

    rust.output_rust(all_objs.value, output_fd, options)
Exemple #18
0
def test_snake_case_ping_names():
    content = {"camelCasePingName": {"include_client_id": True}}

    util.add_required_ping(content)
    errors = list(parser.parse_objects([content]))
    assert len(errors) == 1
    assert "camelCasePingName" in errors[0]
def test_all_metric_types():
    """ Honestly, this is a pretty bad test.
        It generates Rust for a given test metrics.yaml and compares it byte-for-byte
        with an expected output Rust file.
        Expect it to be fragile.
        To generate a new expected output file, copy the test yaml over the one in t/c/g,
        run mach build, then copy the rust output from objdir/t/c/g/api/src/.
    """

    options = {"allow_reserved": False}
    input_files = [
        Path(path.join(path.dirname(__file__), "metrics_test.yaml"))
    ]

    all_objs = parser.parse_objects(input_files, options)
    assert not util.report_validation_errors(all_objs)
    assert not lint.lint_metrics(all_objs.value, options)

    output_fd = io.StringIO()
    rust.output_rust(all_objs.value, output_fd, options)

    with open(path.join(path.dirname(__file__), "metrics_test_output"),
              'r') as file:
        EXPECTED_RUST = file.read()
    assert output_fd.getvalue() == EXPECTED_RUST
def test_send_in_pings_restrictions():
    """Test that invalid ping names are disallowed in `send_in_pings`."""
    all_metrics = parser.parse_objects(ROOT / "data" /
                                       "invalid-ping-names.yaml")
    errors = list(all_metrics)
    assert len(errors) == 1
    assert "'invalid_ping_name' does not match" in errors[0]
def test_misspelling_pings():
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "send_in_pings": ["metric"]
            },
            "string": {
                "type": "string",
                "send_in_pings": ["event"]
            },
            "string2": {
                "type": "string",
                "send_in_pings": ["metrics", "events"]
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 2
    assert set(["MISSPELLED_PING"]) == set(v.check_name for v in nits)
def test_baseline_restriction():
    contents = [{
        "user_data": {
            "counter": {
                "type": "counter",
                "send_in_pings": ["baseline"]
            },
            "string": {
                "type": "string",
                "send_in_pings": ["metrics", "baseline"]
            },
            "string2": {
                "type": "string",
                "send_in_pings": ["metrics"]
            },
        }
    }]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 2
    assert set(["BASELINE_PING"]) == set(v.check_name for v in nits)
Exemple #23
0
def test_multiple_errors():
    """Make sure that if there are multiple errors, we get all of them."""
    contents = [{"camelCaseName": {"metric": {"type": "unknown"}}}]

    contents = [util.add_required(x) for x in contents]
    metrics = parser.parse_objects(contents)
    errors = list(metrics)
    assert len(errors) == 2
def test_timing_distribution_unit_default():
    contents = [{"category1": {"metric1": {"type": "timing_distribution"}}}]
    contents = [util.add_required(x) for x in contents]

    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0
    assert (all_metrics.value["category1"]["metric1"].time_unit ==
            metrics.TimeUnit.nanosecond)
Exemple #25
0
def test_merge_metrics_clash():
    """Merge multiple metrics.yaml files with conflicting metric names."""
    contents = [{"category1": {"metric1": {}}}, {"category1": {"metric1": {}}}]
    contents = [util.add_required(x) for x in contents]

    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert "Duplicate metric name" in errors[0]
Exemple #26
0
def test_required_denominator():
    """denominator is required on use_counter"""
    contents = [{"category": {"metric": {"type": "use_counter"}}}]

    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)
    errors = list(all_metrics)
    assert len(errors) == 1
    assert "denominator is required" in errors[0]
Exemple #27
0
def test_reserved_metrics_category():
    """
    The category "pings" can't be used by metrics -- it's reserved for pings.
    """
    content = {"pings": {"metric": {"type": "string"}}}

    util.add_required(content)
    errors = list(parser.parse_objects(content))
    assert len(errors) == 1
    assert "reserved as a category name" in errors[0]
Exemple #28
0
    def parse(self, filenames, config):
        config = config.copy()
        paths = [Path(fname) for fname in filenames]
        results = parse_objects(paths, config)
        errors = [err for err in results]

        return ({
            normalize_ping_name(ping_name): ping_data.serialize()
            for category, pings in results.value.items()
            for ping_name, ping_data in pings.items()
        }, errors)
Exemple #29
0
def test_duplicate_send_in_pings():
    """Test the basics of parsing a single file."""
    all_metrics = parser.parse_objects(
        [ROOT / "data" / "duplicate_send_in_ping.yaml"],
        config={"allow_reserved": True})

    errs = list(all_metrics)
    assert len(errs) == 0

    metric = all_metrics.value["telemetry"]["test"]
    assert metric.send_in_pings == ["core", "metrics"]
Exemple #30
0
def test_category_generic():
    contents = [{"metrics": {"measurement": {"type": "boolean"}}}]
    contents = [util.add_required(x) for x in contents]
    all_metrics = parser.parse_objects(contents)

    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 1
    assert nits[0][0] == "CATEGORY_GENERIC"

    contents[0]["no_lint"] = ["CATEGORY_GENERIC"]
    all_metrics = parser.parse_objects(contents)
    errs = list(all_metrics)
    assert len(errs) == 0

    nits = lint.lint_metrics(all_metrics.value)

    assert len(nits) == 0