Example #1
0
def load_test_timings(fname, max_diff=1000):
    raw_map = collections.defaultdict(lambda: [])

    class data(object):
        pass

    load_data_from(fname)(None, data)
    for test_type, test_results in data.results.items():
        if test_type == 'io':
            for tests_res in test_results:
                raw_map[tests_res.config.name].append(tests_res.run_interval)

    result = {}
    for name, intervals in raw_map.items():
        intervals.sort()
        curr_start, curr_stop = intervals[0]
        curr_result = []

        for (start, stop) in intervals[1:]:
            if abs(curr_start - start) < max_diff:
                # if abs(curr_stop - stop) > 2:
                #     print abs(curr_stop - stop)
                assert abs(curr_stop - stop) < max_diff
            else:
                assert start + max_diff >= curr_stop
                assert stop > curr_stop
                curr_result.append((curr_start, curr_stop))
                curr_start, curr_stop = start, stop
        curr_result.append((curr_start, curr_stop))

        merged_res = []
        curr_start, curr_stop = curr_result[0]
        for start, stop in curr_result[1:]:
            if abs(curr_stop - start) < max_diff:
                curr_stop = stop
            else:
                merged_res.append((curr_start, curr_stop))
                curr_start, curr_stop = start, stop
        merged_res.append((curr_start, curr_stop))
        result[name] = merged_res

    return result
def load_test_timings(fname, max_diff=1000):
    raw_map = collections.defaultdict(lambda: [])

    class data(object):
        pass

    load_data_from(fname)(None, data)
    for test_type, test_results in data.results.items():
        if test_type == 'io':
            for tests_res in test_results:
                raw_map[tests_res.config.name].append(tests_res.run_interval)

    result = {}
    for name, intervals in raw_map.items():
        intervals.sort()
        curr_start, curr_stop = intervals[0]
        curr_result = []

        for (start, stop) in intervals[1:]:
            if abs(curr_start - start) < max_diff:
                # if abs(curr_stop - stop) > 2:
                #     print abs(curr_stop - stop)
                assert abs(curr_stop - stop) < max_diff
            else:
                assert start + max_diff >= curr_stop
                assert stop > curr_stop
                curr_result.append((curr_start, curr_stop))
                curr_start, curr_stop = start, stop
        curr_result.append((curr_start, curr_stop))

        merged_res = []
        curr_start, curr_stop = curr_result[0]
        for start, stop in curr_result[1:]:
            if abs(curr_stop - start) < max_diff:
                curr_stop = stop
            else:
                merged_res.append((curr_start, curr_stop))
                curr_start, curr_stop = start, stop
        merged_res.append((curr_start, curr_stop))
        result[name] = merged_res

    return result
Example #3
0
def main(argv):
    if faulthandler is not None:
        faulthandler.register(signal.SIGUSR1, all_threads=True)

    opts = parse_args(argv)
    stages = []
    report_stages = []

    ctx = Context()
    ctx.results = {}
    ctx.sensors_data = SensorDatastore()

    if opts.subparser_name == 'test':
        cfg = load_config(opts.config_file)
        make_storage_dir_struct(cfg)
        cfg.comment = opts.comment
        save_run_params(cfg)

        with open(cfg.saved_config_file, 'w') as fd:
            fd.write(pretty_yaml.dumps(cfg.__dict__))

        stages = [
            run_test.discover_stage
        ]

        stages.extend([
            run_test.reuse_vms_stage,
            log_nodes_statistic_stage,
            run_test.save_nodes_stage,
            run_test.connect_stage])

        if cfg.settings.get('collect_info', True):
            stages.append(run_test.collect_hw_info_stage)

        stages.extend([
            # deploy_sensors_stage,
            run_test.run_tests_stage,
            run_test.store_raw_results_stage,
            # gather_sensors_stage
        ])

        cfg.keep_vm = opts.keep_vm
        cfg.no_tests = opts.no_tests
        cfg.dont_discover_nodes = opts.dont_discover_nodes

        ctx.build_meta['build_id'] = opts.build_id
        ctx.build_meta['build_descrption'] = opts.build_description
        ctx.build_meta['build_type'] = opts.build_type

    elif opts.subparser_name == 'ls':
        list_results(opts.result_storage)
        return 0

    elif opts.subparser_name == 'report':
        cfg = load_config(get_test_files(opts.data_dir)['saved_config_file'])
        stages.append(run_test.load_data_from(opts.data_dir))
        opts.no_report = False
        # load build meta

    elif opts.subparser_name == 'compare':
        x = run_test.load_data_from_path(opts.data_path1)
        y = run_test.load_data_from_path(opts.data_path2)
        print(run_test.IOPerfTest.format_diff_for_console(
            [x['io'][0], y['io'][0]]))
        return 0

    if not opts.no_report:
        report_stages.append(run_test.console_report_stage)
        if opts.load_report:
            report_stages.append(run_test.test_load_report_stage)
        report_stages.append(run_test.html_report_stage)

    if opts.log_level is not None:
        str_level = opts.log_level
    else:
        str_level = cfg.settings.get('log_level', 'INFO')

    setup_loggers(getattr(logging, str_level), cfg.log_file)
    logger.info("All info would be stored into " + cfg.results_dir)

    for stage in stages:
        ok = False
        with log_stage(stage):
            stage(cfg, ctx)
            ok = True
        if not ok:
            break

    exc, cls, tb = sys.exc_info()
    for stage in ctx.clear_calls_stack[::-1]:
        with log_stage(stage):
            stage(cfg, ctx)

    logger.debug("Start utils.cleanup")
    for clean_func, args, kwargs in utils.iter_clean_func():
        with log_stage(clean_func):
            clean_func(*args, **kwargs)

    if exc is None:
        for report_stage in report_stages:
            with log_stage(report_stage):
                report_stage(cfg, ctx)

    logger.info("All info stored into " + cfg.results_dir)

    if exc is None:
        logger.info("Tests finished successfully")
        return 0
    else:
        logger.error("Tests are failed. See detailed error above")
        return 1
Example #4
0
def main(argv):
    if faulthandler is not None:
        faulthandler.register(signal.SIGUSR1, all_threads=True)

    opts = parse_args(argv)
    stages = []
    report_stages = []

    ctx = Context()
    ctx.results = {}
    ctx.sensors_data = SensorDatastore()

    if opts.subparser_name == 'test':
        cfg = load_config(opts.config_file)
        make_storage_dir_struct(cfg)
        cfg.comment = opts.comment
        save_run_params(cfg)

        with open(cfg.saved_config_file, 'w') as fd:
            fd.write(pretty_yaml.dumps(cfg.__dict__))

        stages = [run_test.discover_stage]

        stages.extend([
            run_test.reuse_vms_stage, log_nodes_statistic_stage,
            run_test.save_nodes_stage, run_test.connect_stage
        ])

        if cfg.settings.get('collect_info', True):
            stages.append(run_test.collect_hw_info_stage)

        stages.extend([
            # deploy_sensors_stage,
            run_test.run_tests_stage,
            run_test.store_raw_results_stage,
            # gather_sensors_stage
        ])

        cfg.keep_vm = opts.keep_vm
        cfg.no_tests = opts.no_tests
        cfg.dont_discover_nodes = opts.dont_discover_nodes

        ctx.build_meta['build_id'] = opts.build_id
        ctx.build_meta['build_descrption'] = opts.build_description
        ctx.build_meta['build_type'] = opts.build_type

    elif opts.subparser_name == 'ls':
        list_results(opts.result_storage)
        return 0

    elif opts.subparser_name == 'report':
        cfg = load_config(get_test_files(opts.data_dir)['saved_config_file'])
        stages.append(run_test.load_data_from(opts.data_dir))
        opts.no_report = False
        # load build meta

    elif opts.subparser_name == 'compare':
        x = run_test.load_data_from_path(opts.data_path1)
        y = run_test.load_data_from_path(opts.data_path2)
        print(
            run_test.IOPerfTest.format_diff_for_console(
                [x['io'][0], y['io'][0]]))
        return 0

    if not opts.no_report:
        report_stages.append(run_test.console_report_stage)
        if opts.load_report:
            report_stages.append(run_test.test_load_report_stage)
        report_stages.append(run_test.html_report_stage)

    if opts.log_level is not None:
        str_level = opts.log_level
    else:
        str_level = cfg.settings.get('log_level', 'INFO')

    setup_loggers(getattr(logging, str_level), cfg.log_file)
    logger.info("All info would be stored into " + cfg.results_dir)

    for stage in stages:
        ok = False
        with log_stage(stage):
            stage(cfg, ctx)
            ok = True
        if not ok:
            break

    exc, cls, tb = sys.exc_info()
    for stage in ctx.clear_calls_stack[::-1]:
        with log_stage(stage):
            stage(cfg, ctx)

    logger.debug("Start utils.cleanup")
    for clean_func, args, kwargs in utils.iter_clean_func():
        with log_stage(clean_func):
            clean_func(*args, **kwargs)

    if exc is None:
        for report_stage in report_stages:
            with log_stage(report_stage):
                report_stage(cfg, ctx)

    logger.info("All info stored into " + cfg.results_dir)

    if exc is None:
        logger.info("Tests finished successfully")
        return 0
    else:
        logger.error("Tests are failed. See detailed error above")
        return 1