Ejemplo n.º 1
0
    def test_benchmark_meta(self, set_meta, set_data):
        key = 'foo'
        value = 'bar'
        units = 'bogomips'
        direction = 'desc'

        # Test with only a key/value pair
        Benchmark.set_meta(key, value)
        set_meta.assert_called_once_with(key, value)

        # set_data.side_effect = [True]
        # set_data.assert_has_calls([
        #     mock.call({'meta.%s' % key: value})
        #     #set_data({'meta.asdf': value})
        # ])
        # Benchmark.set_data({'meta.%s.value' % key: value})
        # Benchmark.set_data({'meta.%s.units' % key: units})
        # Benchmark.set_data({'meta.%s.direction' % key: direction})
        # set_data.reset_mock()
        set_meta.reset_mock()

        # Test with all parameters
        Benchmark.set_meta(key, value, units, direction)
        set_meta.assert_called_once_with(key, value, units, direction)
        pass
def parse_output(container_id):
    d = Docker()
    raw_logs = d.logs(container_id)

    run_output = raw_logs.splitlines()

    # The final 2 lines we care about have some specific strings
    # search for them and determine if it was actually a successful run

    # Time taken for tests: 27.048s
    # Time per container: 535.584ms [mean] | 1252.565ms [90th] | 2002.064ms [99th]  # noqa

    parsed = run_output[-1].replace('Time per container: ', '').split('|')

    mean = parsed[0].replace('ms [mean] ', '')
    ninety = parsed[1].replace('ms [90th] ', '')
    ninetynine = parsed[2].replace('ms [99th] ', '')

    total_parsed = run_output[-2].replace('Time taken for tests: ', '')
    total_time = total_parsed.replace('s', '')

    action_set("results.total-time", {'value': total_time, 'units': 's'})

    action_set("results.mean-time", {'value': mean, 'units': 'ms'})

    action_set("results.90th-percentile", {'value': ninety, 'units': 'ms'})

    action_set("results.99th-percentile", {'value': ninetynine, 'units': 'ms'})

    Benchmark.set_composite_score(total_time, 'sec', 'desc')
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the raw results of a benchmark run.')
    parser.add_argument("value",
                        metavar='value',
                        help='The raw results of a benchmark run.',
                        type=argparse.FileType('r'))

    args = parser.parse_args()

    Benchmark.set_data({'meta.raw': args.value.read()})
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(
        description='Set a meta key of the benchmark run.')
    parser.add_argument("key",
                        metavar='key',
                        help='The key of the data to store.')

    parser.add_argument("value",
                        metavar='value',
                        help='The value of the data to store.')

    args = parser.parse_args()

    Benchmark.set_meta(args.key, args.value)
Ejemplo n.º 5
0
    def test_benchmark_init(self, in_relation_hook, relation_ids, relation_set,
                            relation_get):

        in_relation_hook.return_value = True
        relation_data = FAKE_RELATION['benchmark:0']['benchmark/0']
        relation_ids.return_value = FAKE_RELATION.keys()
        relation_get.side_effect = lambda k: relation_data.get(k)
        actions = ['asdf', 'foobar']

        with patch_open() as (_open, _file):
            b = Benchmark(actions)

            self.assertIsInstance(b, Benchmark)

            relation_ids.assert_called_once_with('benchmark')

            for key in b.required_keys:
                relation_get.assert_any_call(key)

            relation_set.assert_called_once_with(
                relation_id='benchmark:0',
                relation_settings={'benchmarks': ",".join(actions)})

            # Test benchmark.conf
            _open.assert_called_with('/etc/benchmark.conf', 'w')
            for key, val in relation_data.items():
                _file.write.assert_any_call("%s=%s\n" % (key, val))
def parse_output(container_id):
    d = Docker()
    raw_logs = d.logs(container_id)

    run_output = raw_logs.splitlines()

    # The final 2 lines we care about have some specific strings
    # search for them and determine if it was actually a successful run

    # Time taken for tests: 27.048s
    # Time per container: 535.584ms [mean] | 1252.565ms [90th] | 2002.064ms [99th]  # noqa

    parsed = run_output[-1].replace('Time per container: ', '').split('|')

    mean = parsed[0].replace('ms [mean] ', '')
    ninety = parsed[1].replace('ms [90th] ', '')
    ninetynine = parsed[2].replace('ms [99th] ', '')

    total_parsed = run_output[-2].replace('Time taken for tests: ', '')
    total_time = total_parsed.replace('s', '')

    action_set(
        "results.total-time",
        {'value': total_time, 'units': 's'}
    )

    action_set(
        "results.mean-time",
        {'value': mean, 'units': 'ms'}
    )

    action_set(
        "results.90th-percentile",
        {'value': ninety, 'units': 'ms'}
    )

    action_set(
        "results.99th-percentile",
        {'value': ninetynine, 'units': 'ms'}
    )

    Benchmark.set_composite_score(
        total_time,
        'sec',
        'desc'
    )
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the composite result of a benchmark run.')
    parser.add_argument("key",
                        metavar='key',
                        help='The key of the data to store, i.e., .')

    parser.add_argument("value",
                        metavar='value',
                        help='The key of the data to store, i.e., .')

    parser.add_argument("units",
                        metavar='units',
                        nargs='?',
                        help='''
        The type of units used to measure the composite, i.e., requests/sec.
        ''')
    parser.add_argument("direction",
                        metavar='direction',
                        nargs='?',
                        help='''
        The direction of how the composite should be interpreted. 'asc' if a
        lower number is better; 'desc' if a higher number is better.
        ''')

    args = parser.parse_args()

    Benchmark.set_data({'results.%s.value' % args.key: args.value})
    if args.units:
        Benchmark.set_data({'results.%s.units' % args.key: args.units})
    if args.direction:
        Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
Ejemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(
        description='Inform the Benchmark GUI of available benchmarks'
    )
    parser.add_argument(
        "benchmarks",
        metavar='benchmark(s)',
        nargs='+',
        help='A space-delimited list of benchmarks exposed by the charm.'
    )
    args = parser.parse_args()

    Benchmark(args.benchmarks)
Ejemplo n.º 9
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the composite result of a benchmark run.')
    parser.add_argument("composite",
                        metavar='composite',
                        help='The composite score of the benchmark run.')
    parser.add_argument("units",
                        metavar='units',
                        help='''
        The type of units used to measure the composite, i.e., requests/sec.
        ''')
    parser.add_argument("direction",
                        metavar='direction',
                        help='''
        The direction of how the composite should be interpreted. 'asc' if a
        lower number is better; 'desc' if a higher number is better.
        ''')

    args = parser.parse_args()

    Benchmark().set_composite_score(args.composite, args.units, args.direction)
Ejemplo n.º 10
0
    def test_benchmark_start(self, check_output, exists, action_set,
                             relation_ids, relation_set, in_relation_hook,
                             safe_load):

        exists.return_value = True
        check_output.return_value = "data"
        action_set.return_value = True
        relation_ids.return_value = ['benchmark:1']
        in_relation_hook.return_value = True
        safe_load.side_effect = [METADATA]

        with patch_open() as (_open, _file):
            self.assertTrue(Benchmark.start())
            _open.assert_called_with('/my_dir/metadata.yaml', 'r')

        relation_set.assert_called_once_with(
            relation_id='benchmark:1',
            relation_settings={'action_id': 'my_action'})

        COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
        exists.assert_any_call(COLLECT_PROFILE_DATA)
        check_output.assert_any_call([COLLECT_PROFILE_DATA, 'my_action'])
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser(
        description='Set the composite result of a benchmark run.'
    )
    parser.add_argument(
        "key",
        metavar='key',
        help='The key of the data to store, i.e., .'
    )

    parser.add_argument(
        "value",
        metavar='value',
        help='The key of the data to store, i.e., .'
    )

    parser.add_argument(
        "units",
        metavar='units',
        nargs='?',
        help='''
        The type of units used to measure the composite, i.e., requests/sec.
        '''
    )
    parser.add_argument(
        "direction",
        metavar='direction',
        nargs='?',
        help='''
        The direction of how the composite should be interpreted. 'asc' if a
        lower number is better; 'desc' if a higher number is better.
        '''
    )

    args = parser.parse_args()

    Benchmark.set_data({'results.%s.value' % args.key: args.value})
    if args.units:
        Benchmark.set_data({'results.%s.units' % args.key: args.units})
    if args.direction:
        Benchmark.set_data({'results.%s.direction' % args.key: args.direction})
Ejemplo n.º 12
0
    def test_benchmark_start_oserror(self, action_set):
        action_set.side_effect = OSError('File not found')

        with patch_open() as (_open, _file):
            self.assertFalse(Benchmark.start())
Ejemplo n.º 13
0
def main():
    Benchmark().finish()
Ejemplo n.º 14
0
 def test_set_data(self, action_set):
     action_set.return_value = True
     data = {'key': 'value'}
     self.assertTrue(Benchmark.set_data(data))
     action_set.assert_called_once_with(data)
Ejemplo n.º 15
0
 def test_benchmark_set_composite_score(self, set_data):
     set_data.return_value = True
     self.assertTrue(Benchmark.set_composite_score(15.7, 'hits/sec',
                                                   'desc'))
Ejemplo n.º 16
0
 def test_benchmark_finish(self, action_set):
     action_set.return_value = True
     self.assertTrue(Benchmark.finish())
Ejemplo n.º 17
0
 def test_benchmark_finish_oserror(self, action_set):
     action_set.side_effect = OSError('File not found')
     self.assertFalse(Benchmark.finish())
Ejemplo n.º 18
0
def main():
    Benchmark().start()