def list_results(path):
    results = []

    for dname in os.listdir(path):
        try:
            files_cfg = get_test_files(os.path.join(path, dname))

            if not os.path.isfile(files_cfg['raw_results']):
                continue

            mt = os.path.getmtime(files_cfg['raw_results'])
            res_mtime = time.ctime(mt)

            raw_res = yaml_load(open(files_cfg['raw_results']).read())
            test_names = ",".join(sorted(get_test_names(raw_res)))

            params = load_run_params(files_cfg['run_params_file'])

            comm = params.get('comment')
            results.append((mt, dname, test_names, res_mtime,
                           '-' if comm is None else comm))
        except ValueError:
            pass

    tab = texttable.Texttable(max_width=200)
    tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
    tab.set_cols_align(["l", "l", "l", "l"])
    results.sort()

    for data in results[::-1]:
        tab.add_row(data[1:])

    tab.header(["Name", "Tests", "etime", "Comment"])

    print(tab.draw())
Exemple #2
0
def list_results(path):
    results = []

    for dname in os.listdir(path):
        try:
            files_cfg = get_test_files(os.path.join(path, dname))

            if not os.path.isfile(files_cfg['raw_results']):
                continue

            mt = os.path.getmtime(files_cfg['raw_results'])
            res_mtime = time.ctime(mt)

            raw_res = yaml_load(open(files_cfg['raw_results']).read())
            test_names = ",".join(sorted(get_test_names(raw_res)))

            params = load_run_params(files_cfg['run_params_file'])

            comm = params.get('comment')
            results.append((mt, dname, test_names, res_mtime,
                            '-' if comm is None else comm))
        except ValueError:
            pass

    tab = texttable.Texttable(max_width=200)
    tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER)
    tab.set_cols_align(["l", "l", "l", "l"])
    results.sort()

    for data in results[::-1]:
        tab.add_row(data[1:])

    tab.header(["Name", "Tests", "etime", "Comment"])

    print(tab.draw())
def load_data_from_path(test_res_dir):
    files = get_test_files(test_res_dir)
    raw_res = yaml_load(open(files["raw_results"]).read())
    res = collections.defaultdict(lambda: [])

    for tp, test_lists in raw_res:
        for tests in test_lists:
            for suite_name, suite_data in tests.items():
                result_folder = suite_data[0]
                res[tp].append(TOOL_TYPE_MAPPER[tp].load(suite_name, result_folder))

    return res
Exemple #4
0
def load_data_from_path(test_res_dir):
    files = get_test_files(test_res_dir)
    raw_res = yaml_load(open(files['raw_results']).read())
    res = collections.defaultdict(lambda: [])

    for tp, test_lists in raw_res:
        for tests in test_lists:
            for suite_name, suite_data in tests.items():
                result_folder = suite_data[0]
                res[tp].append(TOOL_TYPE_MAPPER[tp].load(
                    suite_name, result_folder))

    return res
def main(argv):
    if faulthandler is not None:
        faulthandler.register(signal.SIGUSR1, all_threads=True)

    opts = parse_args(argv)
    stages = []
    report_stages = []

    ctx = Context()
    ctx.results = {}
    ctx.sensors_data = SensorDatastore()

    if opts.subparser_name == 'test':
        cfg = load_config(opts.config_file)
        make_storage_dir_struct(cfg)
        cfg.comment = opts.comment
        save_run_params(cfg)

        with open(cfg.saved_config_file, 'w') as fd:
            fd.write(pretty_yaml.dumps(cfg.__dict__))

        stages = [
            run_test.discover_stage
        ]

        stages.extend([
            run_test.reuse_vms_stage,
            log_nodes_statistic_stage,
            run_test.save_nodes_stage,
            run_test.connect_stage])

        if cfg.settings.get('collect_info', True):
            stages.append(run_test.collect_hw_info_stage)

        stages.extend([
            # deploy_sensors_stage,
            run_test.run_tests_stage,
            run_test.store_raw_results_stage,
            # gather_sensors_stage
        ])

        cfg.keep_vm = opts.keep_vm
        cfg.no_tests = opts.no_tests
        cfg.dont_discover_nodes = opts.dont_discover_nodes

        ctx.build_meta['build_id'] = opts.build_id
        ctx.build_meta['build_descrption'] = opts.build_description
        ctx.build_meta['build_type'] = opts.build_type

    elif opts.subparser_name == 'ls':
        list_results(opts.result_storage)
        return 0

    elif opts.subparser_name == 'report':
        cfg = load_config(get_test_files(opts.data_dir)['saved_config_file'])
        stages.append(run_test.load_data_from(opts.data_dir))
        opts.no_report = False
        # load build meta

    elif opts.subparser_name == 'compare':
        x = run_test.load_data_from_path(opts.data_path1)
        y = run_test.load_data_from_path(opts.data_path2)
        print(run_test.IOPerfTest.format_diff_for_console(
            [x['io'][0], y['io'][0]]))
        return 0

    if not opts.no_report:
        report_stages.append(run_test.console_report_stage)
        if opts.load_report:
            report_stages.append(run_test.test_load_report_stage)
        report_stages.append(run_test.html_report_stage)

    if opts.log_level is not None:
        str_level = opts.log_level
    else:
        str_level = cfg.settings.get('log_level', 'INFO')

    setup_loggers(getattr(logging, str_level), cfg.log_file)
    logger.info("All info would be stored into " + cfg.results_dir)

    for stage in stages:
        ok = False
        with log_stage(stage):
            stage(cfg, ctx)
            ok = True
        if not ok:
            break

    exc, cls, tb = sys.exc_info()
    for stage in ctx.clear_calls_stack[::-1]:
        with log_stage(stage):
            stage(cfg, ctx)

    logger.debug("Start utils.cleanup")
    for clean_func, args, kwargs in utils.iter_clean_func():
        with log_stage(clean_func):
            clean_func(*args, **kwargs)

    if exc is None:
        for report_stage in report_stages:
            with log_stage(report_stage):
                report_stage(cfg, ctx)

    logger.info("All info stored into " + cfg.results_dir)

    if exc is None:
        logger.info("Tests finished successfully")
        return 0
    else:
        logger.error("Tests are failed. See detailed error above")
        return 1
Exemple #6
0
def main(argv):
    if faulthandler is not None:
        faulthandler.register(signal.SIGUSR1, all_threads=True)

    opts = parse_args(argv)
    stages = []
    report_stages = []

    ctx = Context()
    ctx.results = {}
    ctx.sensors_data = SensorDatastore()

    if opts.subparser_name == 'test':
        cfg = load_config(opts.config_file)
        make_storage_dir_struct(cfg)
        cfg.comment = opts.comment
        save_run_params(cfg)

        with open(cfg.saved_config_file, 'w') as fd:
            fd.write(pretty_yaml.dumps(cfg.__dict__))

        stages = [run_test.discover_stage]

        stages.extend([
            run_test.reuse_vms_stage, log_nodes_statistic_stage,
            run_test.save_nodes_stage, run_test.connect_stage
        ])

        if cfg.settings.get('collect_info', True):
            stages.append(run_test.collect_hw_info_stage)

        stages.extend([
            # deploy_sensors_stage,
            run_test.run_tests_stage,
            run_test.store_raw_results_stage,
            # gather_sensors_stage
        ])

        cfg.keep_vm = opts.keep_vm
        cfg.no_tests = opts.no_tests
        cfg.dont_discover_nodes = opts.dont_discover_nodes

        ctx.build_meta['build_id'] = opts.build_id
        ctx.build_meta['build_descrption'] = opts.build_description
        ctx.build_meta['build_type'] = opts.build_type

    elif opts.subparser_name == 'ls':
        list_results(opts.result_storage)
        return 0

    elif opts.subparser_name == 'report':
        cfg = load_config(get_test_files(opts.data_dir)['saved_config_file'])
        stages.append(run_test.load_data_from(opts.data_dir))
        opts.no_report = False
        # load build meta

    elif opts.subparser_name == 'compare':
        x = run_test.load_data_from_path(opts.data_path1)
        y = run_test.load_data_from_path(opts.data_path2)
        print(
            run_test.IOPerfTest.format_diff_for_console(
                [x['io'][0], y['io'][0]]))
        return 0

    if not opts.no_report:
        report_stages.append(run_test.console_report_stage)
        if opts.load_report:
            report_stages.append(run_test.test_load_report_stage)
        report_stages.append(run_test.html_report_stage)

    if opts.log_level is not None:
        str_level = opts.log_level
    else:
        str_level = cfg.settings.get('log_level', 'INFO')

    setup_loggers(getattr(logging, str_level), cfg.log_file)
    logger.info("All info would be stored into " + cfg.results_dir)

    for stage in stages:
        ok = False
        with log_stage(stage):
            stage(cfg, ctx)
            ok = True
        if not ok:
            break

    exc, cls, tb = sys.exc_info()
    for stage in ctx.clear_calls_stack[::-1]:
        with log_stage(stage):
            stage(cfg, ctx)

    logger.debug("Start utils.cleanup")
    for clean_func, args, kwargs in utils.iter_clean_func():
        with log_stage(clean_func):
            clean_func(*args, **kwargs)

    if exc is None:
        for report_stage in report_stages:
            with log_stage(report_stage):
                report_stage(cfg, ctx)

    logger.info("All info stored into " + cfg.results_dir)

    if exc is None:
        logger.info("Tests finished successfully")
        return 0
    else:
        logger.error("Tests are failed. See detailed error above")
        return 1