def run_benchmarks(self, test_plan, env): actions_client = get_versioned_juju_api().facades.Actions(env) env_status = env.status() benchmarks = [] for benchmark_plan in test_plan.benchmarks: logging.info('Running benchmark {} on {} with params: {}'.format( benchmark_plan.action, benchmark_plan.unit, benchmark_plan.params)) real_unit = find_unit(benchmark_plan.unit, env_status) if not real_unit: logging.error("unit not found: {}".format(benchmark_plan.unit)) continue try: result = run_action(actions_client, real_unit, benchmark_plan.action, action_param=benchmark_plan.params, timeout=3600) except Exception as e: logging.error('Action run failed: {}'.format(str(e))) continue composite = result.get('meta', {}).get('composite') if not composite: logging.error('Skipping benchmark missing composite key: ' '{}'.format(benchmark_plan.action)) continue composite.update({ 'name': benchmark_plan.action, 'test_id': self.test_id, 'provider': env.provider_name, }) benchmarks.append(model.Benchmark.from_action(composite)) logging.info('Benchmark completed.') return benchmarks
def run_benchmarks(self, test_plan, env): actions_client = get_versioned_juju_api().facades.Actions(env) env_status = env.status() benchmarks = [] for benchmark_plan in test_plan.benchmarks: logging.info('Running benchmark {} on {} with params: {}'.format( benchmark_plan.action, benchmark_plan.unit, benchmark_plan.params)) real_unit = find_unit(benchmark_plan.unit, env_status) if not real_unit: logging.error("unit not found: {}".format(benchmark_plan.unit)) continue try: result = run_action( actions_client, real_unit, benchmark_plan.action, action_param=benchmark_plan.params, timeout=3600) except Exception as e: logging.error('Action run failed: {}'.format(str(e))) continue composite = result.get('meta', {}).get('composite') if not composite: logging.error('Skipping benchmark missing composite key: ' '{}'.format(benchmark_plan.action)) continue composite.update({ 'name': benchmark_plan.action, 'test_id': self.test_id, 'provider': env.provider_name, }) benchmarks.append(model.Benchmark.from_action(composite)) logging.info('Benchmark completed.') return benchmarks
def test_run_action(self): fake_client = FakeActionClient() results = run_action(fake_client, 'git/0', 'list-users', None) expected = {'users': 'user, someuser'} self.assertEqual(results, expected) return results