def test_memory_distribution(): # Test that we get an error for a missing unit contents = [{"category": {"metric": {"type": "memory_distribution"}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert ("`memory_distribution` is missing required parameter `memory_unit`" in errors[0]) # Test that memory_distribution works contents = [{ "category": { "metric": { "type": "memory_distribution", "memory_unit": "megabyte" } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 0 assert len(all_metrics.value) == 1 all_metrics.value["category"][ "metric"].memory_unit == metrics.MemoryUnit.megabyte
def test_quantity(): # Test that we get an error for a missing unit and gecko_datapoint contents = [{"category": {"metric": {"type": "quantity"}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 2 assert any("`quantity` is missing required parameter `unit`" in err for err in errors) assert any("is only allowed for Gecko metrics" in err for err in errors) # Test that quantity works contents = [{ "category": { "metric": { "type": "quantity", "unit": "pixel", "gecko_datapoint": "FOO", } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 0 assert len(all_metrics.value) == 1 all_metrics.value["category"]["metric"].unit == "pixel"
def test_geckoview_only_on_valid_metrics(): for metric in [ "timing_distribution", "custom_distributiuon", "memory_distribution", ]: contents = [{ "category1": { "metric1": { "type": metric, "gecko_datapoint": "FOO" } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) contents = [{ "category1": { "metric1": { "type": "event", "gecko_datapoint": "FOO" } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 1 assert "is only allowed for" in str(errs[0])
def test_reserved_metrics_category(): """ The category "pings" can't be used by metrics -- it's reserved for pings. """ content = {"pings": {"metric": {"type": "string"}}} util.add_required(content) errors = list(parser.parse_objects(content)) assert len(errors) == 1 assert "reserved as a category name" in errors[0]
def test_invalid_names(location, name): contents = location(name) contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) errors = list(all_metrics) assert len(errors) == 1 assert name in errors[0]
def test_combined(): contents = [{ "metrics": { "m_network_latency_ms": { "type": "timespan", "time_unit": "millisecond", }, "m_memory_usage_mb": { "type": "memory_distribution", "memory_unit": "megabyte", }, "m_width_pixels": { "type": "quantity", "gecko_datapoint": "WIDTH_PIXELS", "unit": "pixels", }, } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 5 assert set(["COMMON_PREFIX", "CATEGORY_GENERIC", "UNIT_IN_NAME"]) == set(v[0] for v in nits)
def test_common_prefix(): contents = [{ "telemetry": { "network_latency": { "type": "quantity", "gecko_datapoint": "GC_NETWORK_LATENCY", "unit": "ms", }, "network_bandwidth": { "type": "quantity", "gecko_datapoint": "GC_NETWORK_BANDWIDTH", "unit": "kbps", }, } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 1 assert nits[0][0] == "COMMON_PREFIX" # Now make sure the override works contents[0]["no_lint"] = ["COMMON_PREFIX"] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 0
def test_merge_metrics(): """Merge multiple metrics.yaml files""" contents = [ { 'category1': { 'metric1': {}, 'metric2': {}, }, 'category2': { 'metric3': {}, }, }, { 'category1': { 'metric4': {}, }, 'category3': { 'metric5': {}, }, }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) list(all_metrics) all_metrics = all_metrics.value assert set(all_metrics['category1'].keys()) == set( ['metric1', 'metric2', 'metric4']) assert set(all_metrics['category2'].keys()) == set(['metric3']) assert set(all_metrics['category3'].keys()) == set(['metric5'])
def test_all_pings_reserved(): # send_in_pings: [all_pings] is only allowed for internal metrics contents = [{ "category": { "metric": { "type": "string", "send_in_pings": ["all_pings"] } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert "On instance category.metric" in errors[0] assert "Only internal metrics" in errors[0] all_metrics = parser.parse_objects(contents, {"allow_reserved": True}) errors = list(all_metrics) assert len(errors) == 0 # A custom ping called "all_pings" is not allowed contents = [{"all_pings": {"include_client_id": True}}] contents = [util.add_required_ping(x) for x in contents] all_pings = parser.parse_objects(contents) errors = list(all_pings) assert len(errors) == 1 assert "is not allowed for 'all_pings'"
def test_user_lifetime_expiration(): """Test that expiring 'user' lifetime metrics generate a warning.""" contents = [{ "user_data": { "counter": { "type": "counter", "lifetime": "user", "expires": "2100-01-01", }, "string": { "type": "string", "lifetime": "user", "expires": "never" }, "other": { "type": "string", "lifetime": "application" }, } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 1 assert set(["USER_LIFETIME_EXPIRATION"]) == set(v.check_name for v in nits)
def test_merge_metrics(): """Merge multiple metrics.yaml files""" contents = [ { "category1": { "metric1": {}, "metric2": {} }, "category2": { "metric3": {} } }, { "category1": { "metric4": {} }, "category3": { "metric5": {} } }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) list(all_metrics) all_metrics = all_metrics.value assert set(all_metrics["category1"].keys()) == set( ["metric1", "metric2", "metric4"]) assert set(all_metrics["category2"].keys()) == set(["metric3"]) assert set(all_metrics["category3"].keys()) == set(["metric5"])
def test_misspelling_pings(): contents = [{ "user_data": { "counter": { "type": "counter", "send_in_pings": ["metric"] }, "string": { "type": "string", "send_in_pings": ["event"] }, "string2": { "type": "string", "send_in_pings": ["metrics", "events"] }, } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 2 assert set(["MISSPELLED_PING"]) == set(v.check_name for v in nits)
def test_baseline_restriction(): contents = [{ "user_data": { "counter": { "type": "counter", "send_in_pings": ["baseline"] }, "string": { "type": "string", "send_in_pings": ["metrics", "baseline"] }, "string2": { "type": "string", "send_in_pings": ["metrics"] }, } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 nits = lint.lint_metrics(all_metrics.value) assert len(nits) == 2 assert set(["BASELINE_PING"]) == set(v.check_name for v in nits)
def test_translate_expires(): contents = [{ "metrics": { "a": { "type": "counter", "expires": "never" }, "b": { "type": "counter", "expires": "expired" }, "c": { "type": "counter", "expires": "2000-01-01" }, "d": { "type": "counter", "expires": "2100-01-01" }, } }] contents = [util.add_required(x) for x in contents] objs = parser.parse_objects(contents) assert len(list(objs)) == 0 objs = objs.value assert objs["metrics"]["a"].disabled is False assert objs["metrics"]["b"].disabled is True assert objs["metrics"]["c"].disabled is True assert objs["metrics"]["d"].disabled is False
def test_multiple_errors(): """Make sure that if there are multiple errors, we get all of them.""" contents = [{"camelCaseName": {"metric": {"type": "unknown"}}}] contents = [util.add_required(x) for x in contents] metrics = parser.parse_objects(contents) errors = list(metrics) assert len(errors) == 2
def test_merge_metrics_clash(): """Merge multiple metrics.yaml files with conflicting metric names.""" contents = [{"category1": {"metric1": {}}}, {"category1": {"metric1": {}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert "Duplicate metric name" in errors[0]
def test_required_denominator(): """denominator is required on use_counter""" contents = [{"category": {"metric": {"type": "use_counter"}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert "denominator is required" in errors[0]
def test_timing_distribution_unit_default(): contents = [{"category1": {"metric1": {"type": "timing_distribution"}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errs = list(all_metrics) assert len(errs) == 0 assert (all_metrics.value["category1"]["metric1"].time_unit == metrics.TimeUnit.nanosecond)
def test_snake_case_enforcement(): """Expect exception if names aren't in snake case.""" contents = [ { "categoryWithCamelCase": { "metric": {} } }, { "category": { "metricWithCamelCase": {} } }, ] for content in contents: util.add_required(content) errors = list(parser._load_file(content)) assert len(errors) == 1
def test_snake_case_enforcement(): """Expect exception if names aren't in snake case.""" contents = [ { 'categoryWithCamelCase': { 'metric': {} }, }, { 'category': { 'metricWithCamelCase': {} }, }, ] for content in contents: util.add_required(content) metrics = parser._load_metrics_file(content) errors = list(metrics) assert len(errors) == 1
def test_parser_reserved(): contents = [{"glean.baseline": {"metric": {"type": "string"}}}] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert "For category 'glean.baseline'" in errors[0] all_metrics = parser.parse_objects(contents, {"allow_reserved": True}) errors = list(all_metrics) assert len(errors) == 0
def test_event_must_be_ping_lifetime(): contents = [{ "category": { "metric": { "type": "event", "lifetime": "user" } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents) errors = list(all_metrics) assert len(errors) == 1 assert "Event metrics must have ping lifetime" in errors[0]
def test_no_schema(): """Expect error if no $schema specified in the input file.""" contents = [ { 'category1': { 'metric1': {} }, }, ] contents = [util.add_required(x) for x in contents] del contents[0]['$schema'] all_metrics = parser._load_metrics_file(contents[0]) errors = list(all_metrics) assert len(errors) == 1 assert '$schema key must be set to' in errors[0]
def test_multiple_errors(): """Make sure that if there are multiple errors, we get all of them.""" contents = [ { 'camelCaseName': { 'metric': { 'type': 'unknown', }, }, }, ] contents = [util.add_required(x) for x in contents] metrics = parser.parse_metrics(contents) errors = list(metrics) assert len(errors) == 3
def test_required_denominator(): """denominator is required on use_counter""" contents = [ { 'category': { 'metric': { 'type': 'use_counter', }, }, }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) errors = list(all_metrics) assert len(errors) == 1 assert 'denominator is required' in errors[0]
def test_event_must_be_ping_lifetime(): contents = [ { 'category': { 'metric': { 'type': 'event', 'lifetime': 'user' }, }, }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) errors = list(all_metrics) assert len(errors) == 1 assert "On instance['category']['metric']['lifetime']" in errors[0]
def test_user_and_application_exclusive(): """user_property and application_property may not both be true""" contents = [ { 'category': { 'metric': { 'user_property': True, 'application_property': True, }, }, }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) errors = list(all_metrics) assert len(errors) == 1 assert 'may not both be true' in errors[0]
def test_do_not_disable_expired(): # Test that we get an error for a missing unit and gecko_datapoint contents = [{ "category": { "metric": { "type": "boolean", "expires": "1900-01-01" } } }] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_objects(contents, {"do_not_disable_expired": True}) errors = list(all_metrics) assert len(errors) == 0 metrics = all_metrics.value assert metrics["category"]["metric"].disabled is False
def test_translate_send_in_pings(tmpdir): contents = [ { "baseline": { "counter": {"type": "counter"}, "event": {"type": "event"}, "c": {"type": "counter", "send_in_pings": ["default", "custom"]}, } } ] contents = [util.add_required(x) for x in contents] objs = parser.parse_objects(contents) assert len(list(objs)) == 0 objs = objs.value assert objs["baseline"]["counter"].send_in_pings == ["metrics"] assert objs["baseline"]["event"].send_in_pings == ["events"] assert objs["baseline"]["c"].send_in_pings == ["custom", "metrics"]
def test_merge_metrics_clash(): """Merge multiple metrics.yaml files with conflicting metric names.""" contents = [ { 'category1': { 'metric1': {}, }, }, { 'category1': { 'metric1': {}, }, }, ] contents = [util.add_required(x) for x in contents] all_metrics = parser.parse_metrics(contents) errors = list(all_metrics) assert len(errors) == 1 assert 'Duplicate metric name' in errors[0]