def parse_sysbench_output(): output = '\n'.join(sys.stdin.readlines()) results = {} results['read'] = search(output, 'read') results['write'] = search(output, 'write') results['other'] = search(output, 'other') results['total'] = search(output, 'total') results['transactions'] = search(output, 'transactions') results['deadlocks'] = search(output, 'deadlocks') results['rw.requests'] = search(output, 'read/write requests') results['other.operations'] = search(output, 'other operations') results['total.time'] = search(output, 'total time') results['events'] = search(output, 'total number of events') results['event.time'] = search(output, 'total time taken by event execution') results['min'] = search(output, 'min') results['avg'] = search(output, 'avg') results['max'] = search(output, 'max') results['95th'] = search(output, 'approx. 95 percentile') results['avg.events'] = search(output, 'events (avg/stddev)') results['avg.time'] = search(output, 'execution time (avg/stddev)') for key in results: Benchmark.set_data({"results.%s" % key: results[key]}) Benchmark.set_composite_score(results['95th'], '', 'asc')
def main(): parser = argparse.ArgumentParser( description='Set the raw results of a benchmark run.') parser.add_argument("value", metavar='value', help='The raw results of a benchmark run.', type=argparse.FileType('r')) args = parser.parse_args() Benchmark.set_data({'meta.raw': args.value.read()})
def main(): parser = argparse.ArgumentParser( description='Set the raw results of a benchmark run.' ) parser.add_argument( "value", metavar='value', help='The raw results of a benchmark run.', type=argparse.FileType('r') ) args = parser.parse_args() Benchmark.set_data({'meta.raw': args.value.read()})
def parse_dbench_output(): output = '\n'.join(sys.stdin.readlines()) results = {} key_values = re.split(r'[\n\r]+', output) for pairs in key_values: key, value = pairs.partition("{perf}=")[::2] if key: results[key] = value for key in results: Benchmark.set_data({"results.%s" % key: results[key]}) # print({"results.%s" % key: results[key]}) Benchmark.set_composite_score(results['throughput'], 'throughput')
def parse_sysbench_output(): output = '\n'.join(sys.stdin.readlines()) results = {} results['time'] = search(output, 'total time') results['events'] = search(output, 'total number of events') results['event.time'] = search(output, 'total time taken by event execution') results['min'] = search(output, 'min') results['avg'] = search(output, 'avg') results['max'] = search(output, 'max') results['95th'] = search(output, 'approx. 95 percentile') results['avg.events'] = search(output, 'events (avg/stddev)') results['avg.time'] = search(output, 'execution time (avg/stddev)') for key in results: Benchmark.set_data({"results.%s" % key: results[key]}) Benchmark.set_composite_score(results['95th'], '95th %', 'asc')
def main(): parser = argparse.ArgumentParser( description='Set the composite result of a benchmark run.') parser.add_argument("key", metavar='key', help='The key of the data to store, i.e., .') parser.add_argument("value", metavar='value', help='The key of the data to store, i.e., .') parser.add_argument("units", metavar='units', nargs='?', help=''' The type of units used to measure the composite, i.e., requests/sec. ''') parser.add_argument("direction", metavar='direction', nargs='?', help=''' The direction of how the composite should be interpreted. 'asc' if a lower number is better; 'desc' if a higher number is better. ''') args = parser.parse_args() Benchmark.set_data({'results.%s.value' % args.key: args.value}) if args.units: Benchmark.set_data({'results.%s.units' % args.key: args.units}) if args.direction: Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
def parse_sysbench_output(): output = '\n'.join(sys.stdin.readlines()) results = {} m = re.search(r'total time:\s+(\d+\.\d+s)', output) results['time'] = m.group(1) m = re.search(r'total number of events:\s+(\d+)', output) results['events'] = m.group(1) m = re.search(r'total time taken by event execution:\s+(\d+\.\d+)', output) results['execution'] = m.group(1) m = re.search(r'min:\s+(\d+\.\d+ms)', output) results['min'] = m.group(1) m = re.search(r'avg:\s+(\d+\.\d+ms)', output) results['avg'] = m.group(1) m = re.search(r'max:\s+(\d+\.\d+ms)', output) results['max'] = m.group(1) m = re.search(r'approx.\s+95 percentile:\s+(\d+\.\d+ms)', output) results['95th'] = m.group(1) m = re.search(r'events \(avg\/stddev\):s+(.*?)$', output) if m: results['events_stddev'] = m.group(1) m = re.search(r'execution time \(avg/stddev\):\s+(\d+\.\d\/\d+\.\d+)', output) if m: results['time_stddev'] = m.group(1) for key in results: Benchmark.set_data({"results.%s" % key: results[key]}) Benchmark.set_composite_score(results['95th'], '95th %', 'asc')
def main(): parser = argparse.ArgumentParser( description='Set the composite result of a benchmark run.' ) parser.add_argument( "key", metavar='key', help='The key of the data to store, i.e., .' ) parser.add_argument( "value", metavar='value', help='The key of the data to store, i.e., .' ) parser.add_argument( "units", metavar='units', nargs='?', help=''' The type of units used to measure the composite, i.e., requests/sec. ''' ) parser.add_argument( "direction", metavar='direction', nargs='?', help=''' The direction of how the composite should be interpreted. 'asc' if a lower number is better; 'desc' if a higher number is better. ''' ) args = parser.parse_args() Benchmark.set_data({'results.%s.value' % args.key: args.value}) if args.units: Benchmark.set_data({'results.%s.units' % args.key: args.units}) if args.direction: Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
from charmbenchmark import Benchmark if not sys.argv[1] or not os.path.exists(sys.argv[1]): sys.exit(1) with open(sys.argv[1]) as f: results = json.loads(f.read()) # We only handle 1 scenario ATM result = results[0] b = Benchmark() b.set_data({'results.full-duration.value': result['full_duration']}) b.set_data({'results.full-duration.units': 'seconds'}) b.set_data({'results.full-duration.direction': 'asc'}) b.set_data({'results.load-duration.value': result['load_duration']}) b.set_data({'results.load-duration.units': 'seconds'}) b.set_data({'results.load-duration.direction': 'asc'}) actions = {'average': 0} total = len(result['result']) for r in result['result']: actions['average'] += r['duration'] for a, v in r['atomic_actions'].iteritems(): if a not in actions: actions[a] = 0
def test_set_data(self, action_set): action_set.return_value = True data = {'key': 'value'} self.assertTrue(Benchmark.set_data(data)) action_set.assert_called_once_with(data)