#!/usr/bin/env python3 import argparse import logging import sys from util.color_logger import get_logger from util.datamodel_helper import create_db_session, load_file_in_db, save_file_as_json logger = get_logger('merge_results') if __name__ == "__main__": # Parse Command-line Arguments parser = argparse.ArgumentParser( description='Merge benchmark results into one file', formatter_class=lambda prog: argparse.HelpFormatter( prog, max_help_position=30, width=120)) parser.add_argument('infile', metavar='INFILE', nargs="+", type=argparse.FileType('r'), help='file where the benchmarks are merged into') parser.add_argument('outfile', metavar='OUTFILE', type=argparse.FileType('x'), help='file where the benchmarks are merged into') parser.add_argument( '--database', default='sqlite://', type=str,
import functools import re import numpy from util.color_logger import get_logger logger = get_logger('plot') def calculate_stat_from_array(array): if len(array) == 0: return {} res_dict = {} for type in array[0]: tmp_arr = numpy.array([r[type] for r in array ]) # TODO: support runs with different types tmp_mean = numpy.mean(tmp_arr) if tmp_mean == 0: logger.error('no correct baseline calculated for', type) continue res_dict[type] = {'mean': tmp_mean} return res_dict def preprocess(results, baseline, filter_runtime='.*', filter_benchmark='.*'):
#!/usr/bin/env python3 import argparse import logging import sys from util.bench_results import BenchmarkingResults from util.color_logger import get_logger logger = get_logger('stats') def log_general_stats(results): logger.info("number of different benchmarks present: %d", len(results.get_all_benchmark_names())) for runtime in sorted(results.get_all_runtimes()): logger.info('runtime "%s" has %d benchmarks', runtime, len(list(results.get_all_benchmarks_of_runtime(runtime)))) missing_benchmarks = results.get_missing_benchmark_names(runtime) if missing_benchmarks: logger.warning(' missing: %s', missing_benchmarks) if __name__ == "__main__": # Parse Command-line Arguments parser = argparse.ArgumentParser( description='Evaluate benchmark results', formatter_class=lambda prog: argparse.HelpFormatter( prog, max_help_position=30, width=120))
import platform import re import subprocess import sys import tempfile from datetime import datetime from util.color_logger import get_logger from util.console import query_yes_no import util.datamodel as dm from util.datamodel_helper import create_db_session, load_file_in_db, save_file_as_json,\ get_benchmark_name, get_or_create_config, get_or_create_harness, get_or_create_benchmark logger = get_logger('bench') try: import psutil psutil_imported = True except ImportError: psutil_imported = False logger.warning("psutil not installed, will skip system related metadata") BASE_DIR = os.path.abspath(os.path.join(sys.path[0], '..')) BENCHMARK_DIR = os.path.join(BASE_DIR, 'benchmarks') CONFIG_DIR = os.path.join(BASE_DIR, 'configs') ENV_FILE = os.path.join(CONFIG_DIR, 'env')
#!/usr/bin/env python3 import argparse import csv import logging import sys import re from sqlalchemy.orm import joinedload from util.color_logger import get_logger import util.datamodel as dm from util.datamodel_helper import create_db_session, load_file_in_db logger = get_logger('extract_gcov') CSV_BENCHMARK_NAME = 'benchmark name' CSV_FILE_NAME = 'file name' CSV_EXECUTED_LOC = 'executed LOC' CSV_TOTAL_LOC = 'total LOC' def parse_gcov_output(benchmark_name, output): lines = output.splitlines() for i in range(0, len(lines), 3): file_matcher = re.match("File '(.*)'", lines[i]) loc_matcher = re.match("Lines executed:(.*)% of ([0-9]*)", lines[i + 1]) file_name = file_matcher.group(1) if loc_matcher is None: assert (lines[i + 1] == 'No executable lines')
import json import re from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker, joinedload from dateutil.parser import parse as date_parse import util.datamodel as dm from util.color_logger import get_logger from util.auto_extend_list import auto_extend_list from datetime import timezone logger = get_logger('datamodel_helper') def create_db_session(db_engine='sqlite://'): logger.debug('create database session. Engine: "{}"'.format(db_engine)) engine = create_engine(db_engine) dm.Base.metadata.bind = engine dm.Base.metadata.create_all(engine) DBSession = sessionmaker() DBSession.bind = engine return DBSession() def get_benchmark_name(benchmark): fixture = benchmark.get('fixture')