def main(): parser = argparse.ArgumentParser( description= 'Computes speedups for a given set of Yuck integration test runs', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--db', '--database', dest='database', default='results.db', help='Define results database') parser.add_argument('-p', '--plot', dest='plotDiagrams', action='store_true', help='Plot diagrams') parser.add_argument('--min-runtime', dest='minRuntime', type=int, default=1, help='Ignore quicker runs') parser.add_argument('referenceRun', metavar='reference-run') parser.add_argument('runs', metavar='run', nargs='+') args = parser.parse_args() dburi = 'file:{}?mode=ro'.format(pathname2url(args.database)) with sqlite3.connect(dburi, uri=True) as conn: cursor = conn.cursor() results = computeSpeedups(cursor, args) if results: postprocessedResults = { run: common.analyzeResult(results[run]) for run in results } print(json.dumps(postprocessedResults, sort_keys=True, indent=4)) if (args.plotDiagrams): plotDiagrams(args, results)
def main(): parser = argparse.ArgumentParser( description= 'Computes area ratios for a given set of Yuck integration test runs', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--db', '--database', dest='database', default='results.db', help='Define results database') parser.add_argument('-p', '--plot', dest='plotDiagrams', action='store_true', help='Plot diagrams') parser.add_argument( '--problem-filter', dest='problemFilter', default='', help='Consider only problems that match the given regexp') parser.add_argument( '--instance-filter', dest='instanceFilter', default='', help='Consider only instances that match the given regexp') parser.add_argument('--min-runtime', dest='minRuntime', type=int, default=1, help='Ignore quicker runs') parser.add_argument( '--runtime-tolerance', dest='runtimeTolerance', type=float, default=0.05, help= 'Ignore result of run when it was considerably quicker or slower than the reference run (applies to maximization only)' ) parser.add_argument('referenceRun', metavar='reference-run') parser.add_argument('runs', metavar='run', nargs='+') args = parser.parse_args() dburi = 'file:{}?mode=ro'.format(pathname2url(args.database)) with sqlite3.connect(dburi, uri=True) as conn: cursor = conn.cursor() results = computeAreaRatios(cursor, args) if results: for run in results: if not results[run]: print('Warning: No data for run {}'.format( run, file=sys.stderr)) postprocessedResults = { run: common.analyzeResult(results[run]) for run in results } print(json.dumps(postprocessedResults, sort_keys=True, indent=4)) if (args.plotDiagrams): plotDiagrams(args, results)
def postprocessResult(result): analysis = {} task2gap = { task: result[task]['gap'] for task in result if 'gap' in result[task] } gaps = [task2gap[task] for task in task2gap] if gaps: analysis = common.analyzeResult(task2gap, range=(0, max(gaps))) analysis['failures'] = len( [task for task in result if not result[task]['solved']]) return analysis
def postprocessResult(result): analysis = {} task2penalty = { task: result[task]['penalty'] for task in result if 'penalty' in result[task] } penalties = [task2penalty[task] for task in task2penalty] if penalties: analysis = common.analyzeResult(task2penalty, range=(0, max(penalties))) analysis['failures'] = len( [task for task in result if not result[task]['solved']]) return analysis