Esempio n. 1
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(description=(
        "Plot and compare reports against statistical data. "
        "Returns non-zero exit code if any threshold is exceeded."))

    cli.add_arg_stats(parser)
    cli.add_arg_report(parser)
    cli.add_arg_config(parser)
    parser.add_argument(
        '-l',
        '--label',
        default='fields_overview',
        help='Set plot label. It is also used for the filename.')

    args = parser.parse_args()
    sumstats = args.stats
    field_weights = args.cfg['report']['field_weights']

    try:
        summaries = cli.load_summaries(args.report)
    except ValueError:
        sys.exit(1)

    logging.info('Start Comparison: %s', args.label)
    passed = plot_overview(sumstats, field_weights, summaries, args.label)

    if not passed:
        sys.exit(3)
    else:
        sys.exit(0)
Esempio n. 2
0
def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(
        description='create a summary report from gathered data stored in LMDB '
        'and JSON datafile')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)
    cli.add_arg_limit(parser)
    cli.add_arg_stats_filename(parser, default='')
    cli.add_arg_dnsviz(parser, default='')
    parser.add_argument(
        '--without-dnsviz-errors',
        action='store_true',
        help='omit domains that have any errors in DNSViz results')
    parser.add_argument('--without-diffrepro',
                        action='store_true',
                        help='omit reproducibility data from summary')
    parser.add_argument('--without-ref-unstable',
                        action='store_true',
                        help='omit unstable reference queries from summary')
    parser.add_argument('--without-ref-failing',
                        action='store_true',
                        help='omit failing reference queries from summary')

    return parser.parse_args()
Esempio n. 3
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description='read queries from LMDB, send them in parallel to servers '
        'listed in configuration file, and record answers into LMDB')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    parser.add_argument(
        '--ignore-timeout',
        action="store_true",
        help='continue despite consecutive timeouts from resolvers')

    args = parser.parse_args()
    sendrecv.module_init(args)

    with LMDB(args.envdir) as lmdb:
        meta = MetaDatabase(lmdb, args.cfg['servers']['names'], create=True)
        meta.write_version()
        meta.write_start_time()

        lmdb.open_db(LMDB.QUERIES)
        adb = lmdb.open_db(LMDB.ANSWERS, create=True, check_notexists=True)

        qstream = lmdb.key_value_stream(LMDB.QUERIES)
        txn = lmdb.env.begin(adb, write=True)
        try:
            # process queries in parallel
            with pool.Pool(processes=args.cfg['sendrecv']['jobs'],
                           initializer=sendrecv.worker_init) as p:
                i = 0
                for qkey, blob in p.imap(sendrecv.worker_perform_query,
                                         qstream,
                                         chunksize=100):
                    i += 1
                    if i % 10000 == 0:
                        logging.info('Received {:d} answers'.format(i))
                    txn.put(qkey, blob)
        except KeyboardInterrupt:
            logging.info('SIGINT received, exiting...')
            sys.exit(130)
        except RuntimeError as err:
            logging.error(err)
            sys.exit(1)
        finally:
            # attempt to preserve data if something went wrong (or not)
            logging.debug('Comitting LMDB transaction...')
            txn.commit()
            meta.write_end_time()
Esempio n. 4
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description='attempt to reproduce original diffs from JSON report')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)
    parser.add_argument('--sequential', action='store_true', default=False,
                        help='send one query at a time (slower, but more reliable)')

    args = parser.parse_args()
    sendrecv.module_init(args)
    datafile = cli.get_datafile(args)
    report = DiffReport.from_json(datafile)
    restart_scripts = repro.get_restart_scripts(args.cfg)
    servers = args.cfg['servers']['names']
    dnsreplies_factory = DNSRepliesFactory(servers)

    if args.sequential:
        nproc = 1
    else:
        nproc = args.cfg['sendrecv']['jobs']

    if report.reprodata is None:
        report.reprodata = ReproData()

    with LMDB(args.envdir, readonly=True) as lmdb:
        lmdb.open_db(LMDB.QUERIES)
        cli.check_metadb_servers_version(lmdb, servers)

        dstream = repro.query_stream_from_disagreements(lmdb, report)
        try:
            repro.reproduce_queries(
                dstream, report, dnsreplies_factory, args.cfg['diff']['criteria'],
                args.cfg['diff']['target'], restart_scripts, nproc)
        finally:
            # make sure data is saved in case of interrupt
            report.export_json(datafile)
Esempio n. 5
0
def main():
    global lmdb

    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description=
        'compute diff from answers stored in LMDB and write diffs to LMDB')
    cli.add_arg_envdir(parser)
    cli.add_arg_config(parser)
    cli.add_arg_datafile(parser)

    args = parser.parse_args()
    datafile = cli.get_datafile(args, check_exists=False)
    criteria = args.cfg['diff']['criteria']
    target = args.cfg['diff']['target']
    servers = args.cfg['servers']['names']

    with LMDB(args.envdir) as lmdb_:
        # NOTE: To avoid an lmdb.BadRslotError, probably caused by weird
        # interaction when using multiple transaction / processes, open a separate
        # environment. Also, any dbs have to be opened before using MetaDatabase().
        report = prepare_report(lmdb_, servers)
        cli.check_metadb_servers_version(lmdb_, servers)

    with LMDB(args.envdir, fast=True) as lmdb_:
        lmdb = lmdb_
        lmdb.open_db(LMDB.ANSWERS)
        lmdb.open_db(LMDB.DIFFS, create=True, drop=True)
        qid_stream = lmdb.key_stream(LMDB.ANSWERS)

        dnsreplies_factory = DNSRepliesFactory(servers)
        compare_func = partial(compare_lmdb_wrapper, criteria, target,
                               dnsreplies_factory)
        with pool.Pool() as p:
            for _ in p.imap_unordered(compare_func, qid_stream, chunksize=10):
                pass
        export_json(datafile, report)
Esempio n. 6
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(
        description=
        "use dnsviz to categorize domains (perfect, warnings, errors)")
    cli.add_arg_config(parser)
    cli.add_arg_dnsviz(parser)
    parser.add_argument('input',
                        type=str,
                        help='input file with domains (one qname per line)')
    args = parser.parse_args()

    njobs = args.cfg['sendrecv']['jobs']
    try:
        probe = subprocess.run([
            'dnsviz', 'probe', '-A', '-R', respdiff.dnsviz.TYPES, '-f',
            args.input, '-t',
            str(njobs)
        ],
                               check=True,
                               stdout=subprocess.PIPE)
    except subprocess.CalledProcessError as exc:
        logging.critical("dnsviz probe failed: %s", exc)
        sys.exit(1)
    except FileNotFoundError:
        logging.critical("'dnsviz' tool is not installed!")
        sys.exit(1)

    try:
        subprocess.run(['dnsviz', 'grok', '-o', args.dnsviz],
                       input=probe.stdout,
                       check=True,
                       stdout=subprocess.PIPE)
    except subprocess.CalledProcessError as exc:
        logging.critical("dnsviz grok failed: %s", exc)
        sys.exit(1)
Esempio n. 7
0
def main():
    cli.setup_logging()
    parser = argparse.ArgumentParser(description='compare two diff summaries')
    cli.add_arg_config(parser)
    parser.add_argument('old_datafile',
                        type=str,
                        help='report to compare against')
    parser.add_argument('new_datafile',
                        type=str,
                        help='report to compare evaluate')
    cli.add_arg_envdir(
        parser)  # TODO remove when we no longer need to read queries from lmdb
    cli.add_arg_limit(parser)

    args = parser.parse_args()
    report = DiffReport.from_json(cli.get_datafile(args, key='new_datafile'))
    field_weights = args.cfg['report']['field_weights']
    ref_report = DiffReport.from_json(
        cli.get_datafile(args, key='old_datafile'))

    check_report_summary(report)
    check_report_summary(ref_report)
    check_usable_answers(report, ref_report)

    cli.print_global_stats(report, ref_report)
    cli.print_differences_stats(report, ref_report)

    if report.summary or ref_report.summary:  # when there are any differences to report
        field_counters = report.summary.get_field_counters()
        ref_field_counters = ref_report.summary.get_field_counters()

        # make sure "disappeared" fields show up as well
        for field in ref_field_counters:
            if field not in field_counters:
                field_counters[field] = Counter()

        cli.print_fields_overview(field_counters, len(report.summary),
                                  ref_field_counters)

        for field in field_weights:
            if field in field_counters:
                counter = field_counters[field]
                ref_counter = ref_field_counters.get(field, Counter())

                # make sure "disappeared" mismatches show up as well
                for mismatch in ref_counter:
                    if mismatch not in counter:
                        counter[mismatch] = 0

                cli.print_field_mismatch_stats(field, counter,
                                               len(report.summary),
                                               ref_counter)

        # query details
        with LMDB(args.envdir, readonly=True) as lmdb:
            lmdb.open_db(LMDB.QUERIES)

            queries_all = convert_queries(
                get_query_iterator(lmdb, report.summary.keys()))
            ref_queries_all = convert_queries(
                get_query_iterator(lmdb, ref_report.summary.keys()))

            for field in field_weights:
                if field in field_counters:
                    # ensure "disappeared" mismatches are shown
                    field_mismatches = dict(
                        report.summary.get_field_mismatches(field))
                    ref_field_mismatches = dict(
                        ref_report.summary.get_field_mismatches(field))
                    mismatches = set(field_mismatches.keys())
                    mismatches.update(ref_field_mismatches.keys())

                    for mismatch in mismatches:
                        qids = field_mismatches.get(mismatch, set())
                        queries = convert_queries(
                            get_query_iterator(lmdb, qids))
                        ref_queries = convert_queries(
                            get_query_iterator(
                                lmdb,
                                ref_field_mismatches.get(mismatch, set())))
                        cli.print_mismatch_queries(
                            field, mismatch,
                            get_printable_queries_format(
                                queries, queries_all, ref_queries,
                                ref_queries_all), args.limit)