Beispiel #1
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter,
        description='Convert queries data from standard input and store '
        'wire format into LMDB "queries" DB.')
    cli.add_arg_envdir(parser)
    parser.add_argument(
        '-f',
        '--in-format',
        type=str,
        choices=['text', 'pcap'],
        default='text',
        help='define format for input data, default value is text\n'
        'Expected input for "text" is: "<qname> <RR type>", '
        'one query per line.\n'
        'Expected input for "pcap" is content of the pcap file.')
    parser.add_argument('--pcap-file', type=argparse.FileType('rb'))

    args = parser.parse_args()

    if args.in_format == 'text' and args.pcap_file:
        logging.critical(
            "Argument --pcap-file can be use only in combination with -f pcap")
        sys.exit(1)
    if args.in_format == 'pcap' and not args.pcap_file:
        logging.critical("Missing path to pcap file, use argument --pcap-file")
        sys.exit(1)

    with LMDB(args.envdir) as lmdb:
        qdb = lmdb.open_db(LMDB.QUERIES, create=True, check_notexists=True)
        txn = lmdb.env.begin(qdb, write=True)
        try:
            with pool.Pool(initializer=lambda: signal.signal(
                    signal.SIGINT, signal.SIG_IGN)) as workers:
                if args.in_format == 'text':
                    data_stream = read_lines(sys.stdin)
                    method = wrk_process_line
                elif args.in_format == 'pcap':
                    data_stream = parse_pcap(args.pcap_file)
                    method = wrk_process_frame
                else:
                    logging.error('unknown in-format, use "text" or "pcap"')
                    sys.exit(1)
                for qid, wire in workers.imap(method,
                                              data_stream,
                                              chunksize=1000):
                    if qid is not None:
                        key = qid2key(qid)
                        txn.put(key, wire)
        except KeyboardInterrupt:
            logging.info('SIGINT received, exiting...')
            sys.exit(130)
        except RuntimeError as err:
            logging.error(err)
            sys.exit(1)
        finally:
            # attempt to preserve data if something went wrong (or not)
            logging.debug('Comitting LMDB transaction...')
            txn.commit()
Beispiel #2
0
def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(
        description='create a summary report from gathered data stored in LMDB '
        'and JSON datafile')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)
    cli.add_arg_limit(parser)
    cli.add_arg_stats_filename(parser, default='')
    cli.add_arg_dnsviz(parser, default='')
    parser.add_argument(
        '--without-dnsviz-errors',
        action='store_true',
        help='omit domains that have any errors in DNSViz results')
    parser.add_argument('--without-diffrepro',
                        action='store_true',
                        help='omit reproducibility data from summary')
    parser.add_argument('--without-ref-unstable',
                        action='store_true',
                        help='omit unstable reference queries from summary')
    parser.add_argument('--without-ref-failing',
                        action='store_true',
                        help='omit failing reference queries from summary')

    return parser.parse_args()
Beispiel #3
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description='read queries from LMDB, send them in parallel to servers '
        'listed in configuration file, and record answers into LMDB')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    parser.add_argument(
        '--ignore-timeout',
        action="store_true",
        help='continue despite consecutive timeouts from resolvers')

    args = parser.parse_args()
    sendrecv.module_init(args)

    with LMDB(args.envdir) as lmdb:
        meta = MetaDatabase(lmdb, args.cfg['servers']['names'], create=True)
        meta.write_version()
        meta.write_start_time()

        lmdb.open_db(LMDB.QUERIES)
        adb = lmdb.open_db(LMDB.ANSWERS, create=True, check_notexists=True)

        qstream = lmdb.key_value_stream(LMDB.QUERIES)
        txn = lmdb.env.begin(adb, write=True)
        try:
            # process queries in parallel
            with pool.Pool(processes=args.cfg['sendrecv']['jobs'],
                           initializer=sendrecv.worker_init) as p:
                i = 0
                for qkey, blob in p.imap(sendrecv.worker_perform_query,
                                         qstream,
                                         chunksize=100):
                    i += 1
                    if i % 10000 == 0:
                        logging.info('Received {:d} answers'.format(i))
                    txn.put(qkey, blob)
        except KeyboardInterrupt:
            logging.info('SIGINT received, exiting...')
            sys.exit(130)
        except RuntimeError as err:
            logging.error(err)
            sys.exit(1)
        finally:
            # attempt to preserve data if something went wrong (or not)
            logging.debug('Comitting LMDB transaction...')
            txn.commit()
            meta.write_end_time()
Beispiel #4
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description='attempt to reproduce original diffs from JSON report')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)
    parser.add_argument('--sequential', action='store_true', default=False,
                        help='send one query at a time (slower, but more reliable)')

    args = parser.parse_args()
    sendrecv.module_init(args)
    datafile = cli.get_datafile(args)
    report = DiffReport.from_json(datafile)
    restart_scripts = repro.get_restart_scripts(args.cfg)
    servers = args.cfg['servers']['names']
    dnsreplies_factory = DNSRepliesFactory(servers)

    if args.sequential:
        nproc = 1
    else:
        nproc = args.cfg['sendrecv']['jobs']

    if report.reprodata is None:
        report.reprodata = ReproData()

    with LMDB(args.envdir, readonly=True) as lmdb:
        lmdb.open_db(LMDB.QUERIES)
        cli.check_metadb_servers_version(lmdb, servers)

        dstream = repro.query_stream_from_disagreements(lmdb, report)
        try:
            repro.reproduce_queries(
                dstream, report, dnsreplies_factory, args.cfg['diff']['criteria'],
                args.cfg['diff']['target'], restart_scripts, nproc)
        finally:
            # make sure data is saved in case of interrupt
            report.export_json(datafile)
Beispiel #5
0
def main():
    global lmdb

    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description=
        'compute diff from answers stored in LMDB and write diffs to LMDB')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)

    args = parser.parse_args()
    datafile = cli.get_datafile(args, check_exists=False)
    criteria = args.cfg['diff']['criteria']
    target = args.cfg['diff']['target']
    servers = args.cfg['servers']['names']

    with LMDB(args.envdir) as lmdb_:
        # NOTE: To avoid an lmdb.BadRslotError, probably caused by weird
        # interaction when using multiple transaction / processes, open a separate
        # environment. Also, any dbs have to be opened before using MetaDatabase().
        report = prepare_report(lmdb_, servers)
        cli.check_metadb_servers_version(lmdb_, servers)

    with LMDB(args.envdir, fast=True) as lmdb_:
        lmdb = lmdb_
        lmdb.open_db(LMDB.ANSWERS)
        lmdb.open_db(LMDB.DIFFS, create=True, drop=True)
        qid_stream = lmdb.key_stream(LMDB.ANSWERS)

        dnsreplies_factory = DNSRepliesFactory(servers)
        compare_func = partial(compare_lmdb_wrapper, criteria, target,
                               dnsreplies_factory)
        with pool.Pool() as p:
            for _ in p.imap_unordered(compare_func, qid_stream, chunksize=10):
                pass
        export_json(datafile, report)
Beispiel #6
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(description='compare two diff summaries')
    cli.add_arg_config(parser)
    parser.add_argument('old_datafile',
                        type=str,
                        help='report to compare against')
    parser.add_argument('new_datafile',
                        type=str,
                        help='report to compare evaluate')
    cli.add_arg_envdir(
        parser)  # TODO remove when we no longer need to read queries from lmdb
    cli.add_arg_limit(parser)

    args = parser.parse_args()
    report = DiffReport.from_json(cli.get_datafile(args, key='new_datafile'))
    field_weights = args.cfg['report']['field_weights']
    ref_report = DiffReport.from_json(
        cli.get_datafile(args, key='old_datafile'))

    check_report_summary(report)
    check_report_summary(ref_report)
    check_usable_answers(report, ref_report)

    cli.print_global_stats(report, ref_report)
    cli.print_differences_stats(report, ref_report)

    if report.summary or ref_report.summary:  # when there are any differences to report
        field_counters = report.summary.get_field_counters()
        ref_field_counters = ref_report.summary.get_field_counters()

        # make sure "disappeared" fields show up as well
        for field in ref_field_counters:
            if field not in field_counters:
                field_counters[field] = Counter()

        cli.print_fields_overview(field_counters, len(report.summary),
                                  ref_field_counters)

        for field in field_weights:
            if field in field_counters:
                counter = field_counters[field]
                ref_counter = ref_field_counters.get(field, Counter())

                # make sure "disappeared" mismatches show up as well
                for mismatch in ref_counter:
                    if mismatch not in counter:
                        counter[mismatch] = 0

                cli.print_field_mismatch_stats(field, counter,
                                               len(report.summary),
                                               ref_counter)

        # query details
        with LMDB(args.envdir, readonly=True) as lmdb:
            lmdb.open_db(LMDB.QUERIES)

            queries_all = convert_queries(
                get_query_iterator(lmdb, report.summary.keys()))
            ref_queries_all = convert_queries(
                get_query_iterator(lmdb, ref_report.summary.keys()))

            for field in field_weights:
                if field in field_counters:
                    # ensure "disappeared" mismatches are shown
                    field_mismatches = dict(
                        report.summary.get_field_mismatches(field))
                    ref_field_mismatches = dict(
                        ref_report.summary.get_field_mismatches(field))
                    mismatches = set(field_mismatches.keys())
                    mismatches.update(ref_field_mismatches.keys())

                    for mismatch in mismatches:
                        qids = field_mismatches.get(mismatch, set())
                        queries = convert_queries(
                            get_query_iterator(lmdb, qids))
                        ref_queries = convert_queries(
                            get_query_iterator(
                                lmdb,
                                ref_field_mismatches.get(mismatch, set())))
                        cli.print_mismatch_queries(
                            field, mismatch,
                            get_printable_queries_format(
                                queries, queries_all, ref_queries,
                                ref_queries_all), args.limit)