def test_benchmark_init(self, in_relation_hook, relation_ids, relation_set, relation_get): in_relation_hook.return_value = True relation_data = FAKE_RELATION['benchmark:0']['benchmark/0'] relation_ids.return_value = FAKE_RELATION.keys() relation_get.side_effect = lambda k: relation_data.get(k) actions = ['asdf', 'foobar'] with patch_open() as (_open, _file): b = Benchmark(actions) self.assertIsInstance(b, Benchmark) relation_ids.assert_called_once_with('benchmark') for key in b.required_keys: relation_get.assert_any_call(key) relation_set.assert_called_once_with( relation_id='benchmark:0', relation_settings={'benchmarks': ",".join(actions)}) # Test benchmark.conf _open.assert_called_with('/etc/benchmark.conf', 'w') for key, val in relation_data.items(): _file.write.assert_any_call("%s=%s\n" % (key, val))
def main(): parser = argparse.ArgumentParser( description='Inform the Benchmark GUI of available benchmarks' ) parser.add_argument( "benchmarks", metavar='benchmark(s)', nargs='+', help='A space-delimited list of benchmarks exposed by the charm.' ) args = parser.parse_args() Benchmark(args.benchmarks)
def main(): parser = argparse.ArgumentParser( description='Set the composite result of a benchmark run.') parser.add_argument("composite", metavar='composite', help='The composite score of the benchmark run.') parser.add_argument("units", metavar='units', help=''' The type of units used to measure the composite, i.e., requests/sec. ''') parser.add_argument("direction", metavar='direction', help=''' The direction of how the composite should be interpreted. 'asc' if a lower number is better; 'desc' if a higher number is better. ''') args = parser.parse_args() Benchmark().set_composite_score(args.composite, args.units, args.direction)
def main(): Benchmark().start()
def main(): Benchmark().finish()