def parse_sysbench_output():
    output = '\n'.join(sys.stdin.readlines())

    results = {}

    results['read'] = search(output, 'read')
    results['write'] = search(output, 'write')
    results['other'] = search(output, 'other')
    results['total'] = search(output, 'total')
    results['transactions'] = search(output, 'transactions')
    results['deadlocks'] = search(output, 'deadlocks')
    results['rw.requests'] = search(output, 'read/write requests')
    results['other.operations'] = search(output, 'other operations')
    results['total.time'] = search(output, 'total time')
    results['events'] = search(output, 'total number of events')
    results['event.time'] = search(output, 'total time taken by event execution')
    results['min'] = search(output, 'min')
    results['avg'] = search(output, 'avg')
    results['max'] = search(output, 'max')
    results['95th'] = search(output, 'approx.  95 percentile')
    results['avg.events'] = search(output, 'events (avg/stddev)')
    results['avg.time'] = search(output, 'execution time (avg/stddev)')

    for key in results:
        Benchmark.set_data({"results.%s" % key: results[key]})

    Benchmark.set_composite_score(results['95th'], '', 'asc')
Exemple #2
0
def parse_sysbench_output():
    output = '\n'.join(sys.stdin.readlines())

    results = {}

    results['read'] = search(output, 'read')
    results['write'] = search(output, 'write')
    results['other'] = search(output, 'other')
    results['total'] = search(output, 'total')
    results['transactions'] = search(output, 'transactions')
    results['deadlocks'] = search(output, 'deadlocks')
    results['rw.requests'] = search(output, 'read/write requests')
    results['other.operations'] = search(output, 'other operations')
    results['total.time'] = search(output, 'total time')
    results['events'] = search(output, 'total number of events')
    results['event.time'] = search(output,
                                   'total time taken by event execution')
    results['min'] = search(output, 'min')
    results['avg'] = search(output, 'avg')
    results['max'] = search(output, 'max')
    results['95th'] = search(output, 'approx.  95 percentile')
    results['avg.events'] = search(output, 'events (avg/stddev)')
    results['avg.time'] = search(output, 'execution time (avg/stddev)')

    for key in results:
        Benchmark.set_data({"results.%s" % key: results[key]})

    Benchmark.set_composite_score(results['95th'], '', 'asc')
    def test_benchmark_meta(self, set_meta, set_data):
        key = 'foo'
        value = 'bar'
        units = 'bogomips'
        direction = 'desc'

        # Test with only a key/value pair
        Benchmark.set_meta(key, value)
        set_meta.assert_called_once_with(key, value)

        # set_data.side_effect = [True]
        # set_data.assert_has_calls([
        #     mock.call({'meta.%s' % key: value})
        #     #set_data({'meta.asdf': value})
        # ])
        # Benchmark.set_data({'meta.%s.value' % key: value})
        # Benchmark.set_data({'meta.%s.units' % key: units})
        # Benchmark.set_data({'meta.%s.direction' % key: direction})
        # set_data.reset_mock()
        set_meta.reset_mock()

        # Test with all parameters
        Benchmark.set_meta(key, value, units, direction)
        set_meta.assert_called_once_with(key, value, units, direction)
        pass
    def test_benchmark_init(self, warn, init):

        b = Benchmark()
        warn.assert_called_with('The charm-benchmark library has been renamed charms.benchmark. Please update your code accordingly or report a bug with the upstream project.', DeprecationWarning)
        init.assert_called_with(None)

        actions = ['foo', 'bar']
        b = Benchmark(actions)
        init.assert_called_with(actions)
Exemple #5
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the raw results of a benchmark run.')
    parser.add_argument("value",
                        metavar='value',
                        help='The raw results of a benchmark run.',
                        type=argparse.FileType('r'))

    args = parser.parse_args()

    Benchmark.set_data({'meta.raw': args.value.read()})
def main():
    parser = argparse.ArgumentParser(
        description='Set a meta key of the benchmark run.')
    parser.add_argument("key",
                        metavar='key',
                        help='The key of the data to store.')

    parser.add_argument("value",
                        metavar='value',
                        help='The value of the data to store.')

    args = parser.parse_args()

    Benchmark.set_meta(args.key, args.value)
Exemple #7
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the raw results of a benchmark run.'
    )
    parser.add_argument(
        "value",
        metavar='value',
        help='The raw results of a benchmark run.',
        type=argparse.FileType('r')
    )

    args = parser.parse_args()

    Benchmark.set_data({'meta.raw': args.value.read()})
def parse_dbench_output():
    output = '\n'.join(sys.stdin.readlines())
    results = {}
    key_values = re.split(r'[\n\r]+', output)
    for pairs in key_values:
        key, value = pairs.partition("{perf}=")[::2]
        if key:
            results[key] = value

    for key in results:
        Benchmark.set_data({"results.%s" % key: results[key]})


#       print({"results.%s" % key: results[key]})

    Benchmark.set_composite_score(results['throughput'], 'throughput')
def parse_sysbench_output():
    output = '\n'.join(sys.stdin.readlines())

    results = {}

    results['time'] = search(output, 'total time')
    results['events'] = search(output, 'total number of events')
    results['event.time'] = search(output, 'total time taken by event execution')
    results['min'] = search(output, 'min')
    results['avg'] = search(output, 'avg')
    results['max'] = search(output, 'max')
    results['95th'] = search(output, 'approx.  95 percentile')
    results['avg.events'] = search(output, 'events (avg/stddev)')
    results['avg.time'] = search(output, 'execution time (avg/stddev)')

    for key in results:
        Benchmark.set_data({"results.%s" % key: results[key]})

    Benchmark.set_composite_score(results['95th'], '95th %', 'asc')
Exemple #10
0
def main():
    parser = argparse.ArgumentParser(
        description='Set a meta key of the benchmark run.'
    )
    parser.add_argument(
        "key",
        metavar='key',
        help='The key of the data to store.'
    )

    parser.add_argument(
        "value",
        metavar='value',
        help='The value of the data to store.'
    )

    args = parser.parse_args()

    Benchmark.set_meta(args.key, args.value)
    def test_benchmark_start(self, check_output, exists, action_set):

        exists.return_value = True
        check_output.return_value = "data"
        action_set.return_value = True

        self.assertTrue(Benchmark.start())

        COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
        exists.assert_any_call(COLLECT_PROFILE_DATA)
        check_output.assert_any_call([COLLECT_PROFILE_DATA])
def main():
    parser = argparse.ArgumentParser(
        description='Inform the Benchmark GUI of available benchmarks')
    parser.add_argument(
        "benchmarks",
        metavar='benchmark(s)',
        nargs='+',
        help='A space-delimited list of benchmarks exposed by the charm.')
    args = parser.parse_args()

    Benchmark(args.benchmarks)
Exemple #13
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the composite result of a benchmark run.')
    parser.add_argument("key",
                        metavar='key',
                        help='The key of the data to store, i.e., .')

    parser.add_argument("value",
                        metavar='value',
                        help='The key of the data to store, i.e., .')

    parser.add_argument("units",
                        metavar='units',
                        nargs='?',
                        help='''
        The type of units used to measure the composite, i.e., requests/sec.
        ''')
    parser.add_argument("direction",
                        metavar='direction',
                        nargs='?',
                        help='''
        The direction of how the composite should be interpreted. 'asc' if a
        lower number is better; 'desc' if a higher number is better.
        ''')

    args = parser.parse_args()

    Benchmark.set_data({'results.%s.value' % args.key: args.value})
    if args.units:
        Benchmark.set_data({'results.%s.units' % args.key: args.units})
    if args.direction:
        Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
def parse_sysbench_output():
    output = '\n'.join(sys.stdin.readlines())

    results = {}

    m = re.search(r'total time:\s+(\d+\.\d+s)', output)
    results['time'] = m.group(1)

    m = re.search(r'total number of events:\s+(\d+)', output)
    results['events'] = m.group(1)

    m = re.search(r'total time taken by event execution:\s+(\d+\.\d+)', output)
    results['execution'] = m.group(1)

    m = re.search(r'min:\s+(\d+\.\d+ms)', output)
    results['min'] = m.group(1)

    m = re.search(r'avg:\s+(\d+\.\d+ms)', output)
    results['avg'] = m.group(1)

    m = re.search(r'max:\s+(\d+\.\d+ms)', output)
    results['max'] = m.group(1)

    m = re.search(r'approx.\s+95 percentile:\s+(\d+\.\d+ms)', output)
    results['95th'] = m.group(1)

    m = re.search(r'events \(avg\/stddev\):s+(.*?)$', output)
    if m:
        results['events_stddev'] = m.group(1)

    m = re.search(r'execution time \(avg/stddev\):\s+(\d+\.\d\/\d+\.\d+)', output)
    if m:
        results['time_stddev'] = m.group(1)

    for key in results:
        Benchmark.set_data({"results.%s" % key: results[key]})

    Benchmark.set_composite_score(results['95th'], '95th %', 'asc')
    def test_benchmark_start(self, check_output, exists, action_set,
                             relation_ids, relation_set):

        exists.return_value = True
        check_output.return_value = "data"
        action_set.return_value = True
        relation_ids.return_value = ['benchmark:1']

        self.assertTrue(Benchmark.start())

        relation_set.assert_called_once_with(
            relation_id='benchmark:1',
            relation_settings={'action_id': 'my_action'}
        )

        COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
        exists.assert_any_call(COLLECT_PROFILE_DATA)
        check_output.assert_any_call([COLLECT_PROFILE_DATA])
Exemple #16
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the composite result of a benchmark run.'
    )
    parser.add_argument(
        "key",
        metavar='key',
        help='The key of the data to store, i.e., .'
    )

    parser.add_argument(
        "value",
        metavar='value',
        help='The key of the data to store, i.e., .'
    )

    parser.add_argument(
        "units",
        metavar='units',
        nargs='?',
        help='''
        The type of units used to measure the composite, i.e., requests/sec.
        '''
    )
    parser.add_argument(
        "direction",
        metavar='direction',
        nargs='?',
        help='''
        The direction of how the composite should be interpreted. 'asc' if a
        lower number is better; 'desc' if a higher number is better.
        '''
    )

    args = parser.parse_args()

    Benchmark.set_data({'results.%s.value' % args.key: args.value})
    if args.units:
        Benchmark.set_data({'results.%s.units' % args.key: args.units})
    if args.direction:
        Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
 def test_benchmark_set_composite_score(self, set_data):
     set_data.return_value = True
     self.assertTrue(Benchmark.set_composite_score(
         15.7, 'hits/sec', 'desc'))
Exemple #18
0
import sys
import json

from charmbenchmark import Benchmark

if not sys.argv[1] or not os.path.exists(sys.argv[1]):
    sys.exit(1)

with open(sys.argv[1]) as f:
    results = json.loads(f.read())

# We only handle 1 scenario ATM

result = results[0]

b = Benchmark()

b.set_data({'results.full-duration.value': result['full_duration']})
b.set_data({'results.full-duration.units': 'seconds'})
b.set_data({'results.full-duration.direction': 'asc'})

b.set_data({'results.load-duration.value': result['load_duration']})
b.set_data({'results.load-duration.units': 'seconds'})
b.set_data({'results.load-duration.direction': 'asc'})

actions = {'average': 0}
total = len(result['result'])

for r in result['result']:
    actions['average'] += r['duration']
    for a, v in r['atomic_actions'].iteritems():
Exemple #19
0
def main():
    Benchmark().start()
def main():
    Benchmark().finish()
 def test_benchmark_finish_oserror(self, action_set):
     action_set.side_effect = OSError('File not found')
     self.assertFalse(Benchmark.finish())
 def test_set_data(self, action_set):
     action_set.return_value = True
     data = {'key': 'value'}
     self.assertTrue(Benchmark.set_data(data))
     action_set.assert_called_once_with(data)
 def test_benchmark_finish(self, action_set):
     action_set.return_value = True
     self.assertTrue(Benchmark.finish())