def export_qids_to_qname_qtype(qids: Set[QID], lmdb, file=sys.stdout): for qid, qwire in get_query_iterator(lmdb, qids): try: query = qwire_to_qname_qtype(qwire) except ValueError as exc: logging.debug('Omitting QID %d from export: %s', qid, exc) else: print(query, file=file)
def export_qids_to_qname(qids: Set[QID], lmdb, file=sys.stdout): domains = set() # type: Set[str] for qid, qwire in get_query_iterator(lmdb, qids): try: qname = qwire_to_qname(qwire) except ValueError as exc: logging.debug('Omitting QID %d from export: %s', qid, exc) else: if qname not in domains: print(qname, file=file) domains.add(qname)
def export_qids_to_base64url(qids: Set[QID], lmdb, file=sys.stdout): wires = set() # type: Set[bytes] for _, qwire in get_query_iterator(lmdb, qids): if qwire not in wires: print(base64.urlsafe_b64encode(qwire).decode('ascii'), file=file) wires.add(qwire)
def main(): cli.setup_logging() parser = argparse.ArgumentParser(description='compare two diff summaries') cli.add_arg_config(parser) parser.add_argument('old_datafile', type=str, help='report to compare against') parser.add_argument('new_datafile', type=str, help='report to compare evaluate') cli.add_arg_envdir( parser) # TODO remove when we no longer need to read queries from lmdb cli.add_arg_limit(parser) args = parser.parse_args() report = DiffReport.from_json(cli.get_datafile(args, key='new_datafile')) field_weights = args.cfg['report']['field_weights'] ref_report = DiffReport.from_json( cli.get_datafile(args, key='old_datafile')) check_report_summary(report) check_report_summary(ref_report) check_usable_answers(report, ref_report) cli.print_global_stats(report, ref_report) cli.print_differences_stats(report, ref_report) if report.summary or ref_report.summary: # when there are any differences to report field_counters = report.summary.get_field_counters() ref_field_counters = ref_report.summary.get_field_counters() # make sure "disappeared" fields show up as well for field in ref_field_counters: if field not in field_counters: field_counters[field] = Counter() cli.print_fields_overview(field_counters, len(report.summary), ref_field_counters) for field in field_weights: if field in field_counters: counter = field_counters[field] ref_counter = ref_field_counters.get(field, Counter()) # make sure "disappeared" mismatches show up as well for mismatch in ref_counter: if mismatch not in counter: counter[mismatch] = 0 cli.print_field_mismatch_stats(field, counter, len(report.summary), ref_counter) # query details with LMDB(args.envdir, readonly=True) as lmdb: lmdb.open_db(LMDB.QUERIES) queries_all = convert_queries( get_query_iterator(lmdb, report.summary.keys())) ref_queries_all = convert_queries( get_query_iterator(lmdb, ref_report.summary.keys())) for field in field_weights: if field in field_counters: # ensure "disappeared" mismatches are shown field_mismatches = dict( report.summary.get_field_mismatches(field)) ref_field_mismatches = dict( ref_report.summary.get_field_mismatches(field)) mismatches = set(field_mismatches.keys()) mismatches.update(ref_field_mismatches.keys()) for mismatch in mismatches: qids = field_mismatches.get(mismatch, set()) queries = convert_queries( get_query_iterator(lmdb, qids)) ref_queries = convert_queries( get_query_iterator( lmdb, ref_field_mismatches.get(mismatch, set()))) cli.print_mismatch_queries( field, mismatch, get_printable_queries_format( queries, queries_all, ref_queries, ref_queries_all), args.limit)
def main(): cli.setup_logging() args = parse_args() datafile = cli.get_datafile(args) report = DiffReport.from_json(datafile) field_weights = args.cfg['report']['field_weights'] check_args(args, report) ignore_qids = set() if args.without_ref_unstable or args.without_ref_failing: try: stats = cli.read_stats(args.stats_filename) except ValueError as exc: logging.critical(str(exc)) sys.exit(1) if args.without_ref_unstable: ignore_qids.update(stats.queries.unstable) if args.without_ref_failing: ignore_qids.update(stats.queries.failing) report = DiffReport.from_json(datafile) report.summary = Summary.from_report( report, field_weights, without_diffrepro=args.without_diffrepro, ignore_qids=ignore_qids) # dnsviz filter: by domain -> need to iterate over disagreements to get QIDs if args.without_dnsviz_errors: try: dnsviz_grok = DnsvizGrok.from_json(args.dnsviz) except (FileNotFoundError, RuntimeError) as exc: logging.critical('Failed to load dnsviz data: %s', exc) sys.exit(1) error_domains = dnsviz_grok.error_domains() with LMDB(args.envdir, readonly=True) as lmdb: lmdb.open_db(LMDB.QUERIES) # match domain, add QID to ignore for qid, wire in get_query_iterator(lmdb, report.summary.keys()): msg = dns.message.from_wire(wire) if msg.question: if any(msg.question[0].name.is_subdomain(name) for name in error_domains): ignore_qids.add(qid) report.summary = Summary.from_report( report, field_weights, without_diffrepro=args.without_diffrepro, ignore_qids=ignore_qids) cli.print_global_stats(report) cli.print_differences_stats(report) if report.summary: # when there are any differences to report field_counters = report.summary.get_field_counters() cli.print_fields_overview(field_counters, len(report.summary)) for field in field_weights: if field in report.summary.field_labels: cli.print_field_mismatch_stats(field, field_counters[field], len(report.summary)) # query details with LMDB(args.envdir, readonly=True) as lmdb: lmdb.open_db(LMDB.QUERIES) for field in field_weights: if field in report.summary.field_labels: for mismatch, qids in report.summary.get_field_mismatches( field): queries = convert_queries( get_query_iterator(lmdb, qids)) cli.print_mismatch_queries( field, mismatch, get_printable_queries_format(queries), args.limit) report.export_json(datafile)