def test_all_metric_types(): """ Honestly, this is a pretty bad test. It generates Rust for a given test metrics.yaml and compares it byte-for-byte with an expected output Rust file. Expect it to be fragile. To generate a new expected output file, copy the test yaml over the one in t/c/g, run mach build, then copy the rust output from objdir/t/c/g/api/src/. """ options = {"allow_reserved": False} input_files = [ Path(path.join(path.dirname(__file__), "metrics_test.yaml")) ] all_objs = parser.parse_objects(input_files, options) assert not util.report_validation_errors(all_objs) assert not lint.lint_metrics(all_objs.value, options) output_fd = io.StringIO() rust.output_rust(all_objs.value, output_fd, options) with open(path.join(path.dirname(__file__), "metrics_test_output"), 'r') as file: EXPECTED_RUST = file.read() assert output_fd.getvalue() == EXPECTED_RUST
def main(output_fd, metrics_index_path, which_array): # Source the list of input files from `metrics_index.py` sys.path.append(str(Path(metrics_index_path).parent)) from metrics_index import METRICS, PINGS if which_array == 'METRICS': input_files = METRICS elif which_array == 'PINGS': input_files = PINGS else: print("Build system's asking for unknown array {}".format(which_array)) sys.exit(1) # Derived heavily from glean_parser.translate.translate. # Adapted to how mozbuild sends us a fd. options = {"allow_reserved": False} input_files = [Path(x) for x in input_files] all_objs = parser.parse_objects(input_files, options) if util.report_validation_errors(all_objs): sys.exit(1) if lint.lint_metrics(all_objs.value, options): # Treat Warnings as Errors in FOG sys.exit(1) rust.output_rust(all_objs.value, output_fd, options)
def main(output_fd, *metrics_yamls): # Derived heavily from glean_parser.translate.translate. # Adapted to how mozbuild sends us a fd. options = {"allow_reserved": False} input_files = [Path(x) for x in metrics_yamls] all_objs = parser.parse_objects(input_files, options) if util.report_validation_errors(all_objs): sys.exit(1) if lint.lint_metrics(all_objs.value, options): # Treat Warnings as Errors in FOG sys.exit(1) rust.output_rust(all_objs.value, output_fd, options)
def test_fake_pings(): """Another similarly-bad test. It generates Rust for pings_test.yaml, comparing it byte-for-byte with an expected output Rust file. Expect it to be fragile. To generate new expected output files, set `UPDATE_EXPECT=1` when running the test suite: UPDATE_EXPECT=1 mach test toolkit/components/glean/pytest """ options = {"allow_reserved": False} input_files = [Path(path.join(path.dirname(__file__), "pings_test.yaml"))] all_objs, options = run_glean_parser.parse_with_options( input_files, options) output_fd = io.StringIO() rust.output_rust(all_objs, output_fd, options) expect(path.join(path.dirname(__file__), "pings_test_output"), output_fd.getvalue())
def test_all_metric_types(): """Honestly, this is a pretty bad test. It generates Rust for a given test metrics.yaml and compares it byte-for-byte with an expected output Rust file. Expect it to be fragile. To generate new expected output files, set `UPDATE_EXPECT=1` when running the test suite: UPDATE_EXPECT=1 mach test toolkit/components/glean/pytest """ options = {"allow_reserved": False} input_files = [ Path(path.join(path.dirname(__file__), "metrics_test.yaml")) ] all_objs, options = run_glean_parser.parse_with_options( input_files, options) output_fd = io.StringIO() rust.output_rust(all_objs, output_fd, options) expect(path.join(path.dirname(__file__), "metrics_test_output"), output_fd.getvalue())
def main(output_fd, _metrics_index, *args): all_objs, options = parse(args) rust.output_rust(all_objs, output_fd, options)
def main(output_fd, *args): args = args[DEPS_LEN:] all_objs, options = parse(args) rust.output_rust(all_objs, output_fd, options)