Example #1
0
def main():
    setup_loggers()
    parser = argparse.ArgumentParser(
        description='query and compare CBT test results')
    parser.add_argument(
        '-a',
        '--archive',
        required=True,
        help='Directory where the results to be compared are archived.')
    parser.add_argument(
        '-b',
        '--baseline',
        required=True,
        help='Directory where the baseline results are archived.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='be chatty')
    ctx = parser.parse_args(sys.argv[1:])
    # settings.initialize() expects ctx.config_file and ctx.conf
    ctx.config_file = os.path.join(ctx.archive, 'results', 'cbt_config.yaml')
    ctx.conf = None
    settings.initialize(ctx)

    results = []
    for iteration in range(settings.cluster.get('iterations', 0)):
        cluster = Ceph(settings.cluster)
        benchmarks = list(
            zip(benchmarkfactory.get_all(ctx.archive, cluster, iteration),
                benchmarkfactory.get_all(ctx.baseline, cluster, iteration)))
        for current, baseline in benchmarks:
            if not current.exists(True):
                logger.error("tested: %s result does not exist in %s", current,
                             ctx.archive)
                break
            if not baseline.exists(True):
                logger.error("baseline: %s result does not exist in %s",
                             baseline, ctx.baseline)
                break
            results.extend(current.evaluate(baseline))

    accepted = sum(result.accepted for result in results)
    if ctx.verbose:
        for result in results:
            if result.accepted:
                logger.info(result)
            else:
                logger.warning(result)

    rejected = len(results) - accepted
    if rejected > 0:
        logger.warning("%d tests failed out of %d", rejected, len(results))
        sys.exit(1)
    else:
        logger.info("All %d tests passed.", len(results))
Example #2
0
def main(argv):
    setup_loggers()
    ctx = parse_args(argv)
    settings.initialize(ctx)

    iteration = 0
    logger.debug("Settings.cluster:\n    %s",
                 pprint.pformat(settings.cluster).replace("\n", "\n    "))

    global_init = collections.OrderedDict()

    # FIXME: Create ClusterFactory and parametrically match benchmarks and clusters.
    cluster = Ceph(settings.cluster)

    # E_OK
    return_code = 0

    try:
        for iteration in range(settings.cluster.get("iterations", 0)):
            archive_dir = settings.cluster.get('archive_dir')
            benchmarks = benchmarkfactory.get_all(archive_dir, cluster,
                                                  iteration)
            for b in benchmarks:
                if b.exists():
                    continue

                # Tell the benchmark to initialize unless it's in the skip list.
                if b.getclass() not in global_init:
                    b.initialize()

                    # Skip future initializations unless rebuild requested.
                    if not settings.cluster.get('rebuild_every_test', False):
                        global_init[b.getclass()] = b

                # always try to initialize endpoints.
                b.initialize_endpoints()

                try:
                    b.run()
                finally:
                    if b.getclass() not in global_init:
                        b.cleanup()
    except:
        return_code = 1  # FAIL
        logger.exception("During tests")
    finally:
        for k, b in list(global_init.items()):
            try:
                b.cleanup()
            except:
                logger.exception("During %s cleanup", k)
                return_code = 1  # FAIL

    return return_code
Example #3
0
File: cbt.py Project: sirspock/cbt
def main(argv):
    setup_loggers()
    ctx = parse_args(argv)
    settings.initialize(ctx)

    iteration = 0
    logger.debug("Settings.cluster:\n    %s", pprint.pformat(settings.cluster).replace("\n", "\n    "))

    global_init = collections.OrderedDict()

    # FIXME: Create ClusterFactory and parametrically match benchmarks and clusters.
    cluster = Ceph(settings.cluster)

    # E_OK
    return_code = 0

    try:
        for iteration in range(settings.cluster.get("iterations", 0)):
            benchmarks = benchmarkfactory.get_all(cluster, iteration)
            for b in benchmarks:
                if b.exists():
                    continue

                # Tell the benchmark to initialize unless it's in the skip list.
                if b.getclass() not in global_init:
                    b.initialize()

                    # Skip future initializations unless rebuild requested.
                    if not settings.cluster.get("rebuild_every_test", False):
                        global_init[b.getclass()] = b

                try:
                    b.run()
                finally:
                    if b.getclass() not in global_init:
                        b.cleanup()
    except:
        return_code = 1  # FAIL
        logger.exception("During tests")
    finally:
        for k, b in global_init.items():
            try:
                b.cleanup()
            except:
                logger.exception("During %s cleanup", k)
                return_code = 1  # FAIL

    return return_code
Example #4
0
def main(argv):
    setup_loggers()
    ctx = parse_args(argv)
    settings.initialize(ctx)

    logger.debug("Settings.general:\n    %s",
                 pprint.pformat(settings.general).replace("\n", "\n    "))

    pool = Pool()
    pool.print_counts() 

    for i in xrange(5, 20):
        print ""
        print "setting osds to: %s" % i
        pool.set_osds(i)
        pool.print_counts()
Example #5
0
def main(argv):
    setup_loggers()
    ctx = parse_args(argv)
    settings.initialize(ctx)

    logger.debug("Settings.general:\n    %s",
                 pprint.pformat(settings.general).replace("\n", "\n    "))

    pool = Pool()
    pool.print_counts()

    for i in xrange(9, 10):
        print ""
        print "setting osds to: %s" % i
        pool.set_osds(i)
        pool.print_counts()
Example #6
0
def main():
    setup_loggers()
    parser = argparse.ArgumentParser(
        description='query and compare CBT test results')
    parser.add_argument(
        '-a',
        '--archive',
        required=True,
        help='Directory where the results to be compared are archived.')
    parser.add_argument(
        '-b',
        '--baseline',
        required=True,
        help='Directory where the baseline results are archived.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='be chatty')
    parser.add_argument('--output',
                        help='write result in markdown to specified file',
                        type=argparse.FileType('w'))
    ctx = parser.parse_args(sys.argv[1:])
    # settings.initialize() expects ctx.config_file and ctx.conf
    ctx.config_file = os.path.join(ctx.archive, 'results', 'cbt_config.yaml')
    ctx.conf = None
    settings.initialize(ctx)

    results = []
    for iteration in range(settings.cluster.get('iterations', 0)):
        cluster = Ceph(settings.cluster)
        benchmarks = list(
            zip(benchmarkfactory.get_all(ctx.archive, cluster, iteration),
                benchmarkfactory.get_all(ctx.baseline, cluster, iteration)))
        for current, baseline in benchmarks:
            if not current.exists(True):
                logger.error("tested: %s result does not exist in %s", current,
                             ctx.archive)
                break
            if not baseline.exists(True):
                logger.error("baseline: %s result does not exist in %s",
                             baseline, ctx.baseline)
                break
            results.extend(current.evaluate(baseline))

    nr_accepted = sum(result.accepted for result in results)
    if ctx.verbose:
        for result in results:
            if result.accepted:
                logger.info(result)
            else:
                logger.warning(result)

    nr_tests = len(results)
    nr_rejected = nr_tests - nr_accepted

    if ctx.output:
        heading = None
        if nr_rejected:
            heading = Heading3(f'{nr_rejected} out of {nr_tests} failed')
        else:
            heading = Heading3(f'all {nr_tests} tests passed')
        ctx.output.write(str(heading))

        table = Table()
        table.add_headers('run', 'metric', 'baseline', 'result', 'accepted')
        for r in results:
            table.add_cells(r.run, r.alias, r.baseline, r.result,
                            '  ' if r.accepted else ':x:')
        ctx.output.write(str(table))

    if nr_rejected > 0:
        logger.warning("%d tests failed out of %d", nr_rejected, len(results))
        sys.exit(1)
    else:
        logger.info("All %d tests passed.", len(results))
Example #7
0
    parser.add_argument(
        '--conf',
        required=False,
        help='The ceph.conf file to use.',
    )
    parser.add_argument(
        'config_file',
        help='YAML config file.',
    )
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    setup_loggers()
    ctx = parse_args()
    settings.initialize(ctx)

    iteration = 0
    logger.debug("Settings.cluster: %s", settings.cluster)
    global_init = {}
    # FIXME: Create ClusterFactory and parametrically match benchmarks and clusters.
    cluster = Ceph(settings.cluster)
    while (iteration < settings.cluster.get("iterations", 0)):
        benchmarks = benchmarkfactory.getAll(cluster, iteration)
        for b in benchmarks:
            if b.exists():
                continue
            # Tell the benchmark to initialize unless it's in the skip list.
            if not b.getclass() in global_init: