def run_scenario(self, scenario, auth_kwargs, run_results, noop=False, with_profiling=False, keep_objects=False, batch_size=1): """ Runs a CRUD scenario, given cluster parameters and a Scenario object. :param scenario: Scenario object describing the benchmark run :param auth_kwargs: All-you-can-eat dictionary of authentication-related arguments. :param run_results: RunResults objects for the run :param noop: Run in no-op mode? :param with_profiing: Profile the run? :param keep_objects: Keep uploaded objects instead of deleting them? :param batch_size: Send this many bench jobs per packet to workers :param returns: Collected result records from workers """ run_state = RunState() logging.info(u'Starting scenario run for "%s"', scenario.name) raise_file_descriptor_limit() # Construct auth_kwargs appropriate for client.get_auth() if auth_kwargs.get('token'): auth_kwargs = { 'storage_urls': auth_kwargs['storage_urls'], 'token': auth_kwargs['token'], } # Ensure containers exist if not noop: storage_urls, c_token = self._authenticate(auth_kwargs) logging.info( 'Ensuring %d containers (%s_*) exist; ' 'concurrency=%d...', len(scenario.containers), scenario.container_base, scenario.container_concurrency) pool = gevent.pool.Pool(scenario.container_concurrency) for container in scenario.containers: pool.spawn(_container_creator, storage_urls, c_token, container, policy=scenario.policy) pool.join() # Enqueue initialization jobs if not noop: logging.info( 'Initializing cluster with stock data (up to %d ' 'concurrent workers)', scenario.user_count) self.do_a_run(scenario.user_count, scenario.initial_jobs(), run_state.handle_initialization_result, auth_kwargs, batch_size=batch_size) logging.info('Starting benchmark run (up to %d concurrent ' 'workers)', scenario.user_count) if noop: logging.info(' (not actually talking to Swift cluster!)') if with_profiling: import cProfile prof = cProfile.Profile() prof.enable() self.do_a_run(scenario.user_count, scenario.bench_jobs(), run_state.handle_run_result, auth_kwargs, mapper_fn=run_state.fill_in_job, label='Benchmark Run:', noop=noop, batch_size=batch_size, run_results=run_results) if with_profiling: prof.disable() prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid() prof.dump_stats(prof_output_path) logging.info('PROFILED main do_a_run to %s', prof_output_path) if not noop and not keep_objects: logging.info('Deleting population objects from cluster') self.do_a_run(scenario.user_count, run_state.cleanup_object_infos(), lambda *_: None, auth_kwargs, mapper_fn=_gen_cleanup_job, batch_size=batch_size) elif keep_objects: logging.info('NOT deleting any objects due to -k/--keep-objects')
def setUp(self): self.run_state = RunState()