Exemplo n.º 1
0
    def __init__(self,
                 zmq_host,
                 zmq_work_port,
                 zmq_results_port,
                 worker_id,
                 max_retries,
                 profile_count=0,
                 concurrency=256,
                 batch_size=1):
        work_endpoint = 'tcp://%s:%d' % (zmq_host, zmq_work_port)
        results_endpoint = 'tcp://%s:%d' % (zmq_host, zmq_results_port)
        ipv6 = is_ipv6(zmq_host)
        self.worker_id = worker_id
        self.max_retries = max_retries
        self.profile_count = profile_count
        self.batch_size = batch_size

        raise_file_descriptor_limit()

        self.concurrency = concurrency
        self.conn_pools_lock = gevent.coros.Semaphore(1)
        self.conn_pools = {}  # hashed by storage_url
        self.token_data = {}
        self.token_data_lock = gevent.coros.Semaphore(1)

        self.context = zmq.Context()
        self.work_pull = self.context.socket(zmq.PULL)
        self.work_pull.ipv6 = ipv6
        self.work_pull.connect(work_endpoint)
        self.results_push = self.context.socket(zmq.PUSH)
        self.results_push.ipv6 = ipv6
        self.results_push.connect(results_endpoint)

        self.result_queue = gevent.queue.Queue()
Exemplo n.º 2
0
    def __init__(self, zmq_host, zmq_work_port, zmq_results_port, worker_id,
                 max_retries, profile_count=0, concurrency=256, batch_size=1):
        work_endpoint = 'tcp://%s:%d' % (zmq_host, zmq_work_port)
        results_endpoint = 'tcp://%s:%d' % (zmq_host, zmq_results_port)
        ipv6 = is_ipv6(zmq_host)
        self.worker_id = worker_id
        self.max_retries = max_retries
        self.profile_count = profile_count
        self.batch_size = batch_size

        raise_file_descriptor_limit()

        self.concurrency = concurrency
        self.conn_pools_lock = gevent.coros.Semaphore(1)
        self.conn_pools = {}  # hashed by storage_url
        self.token_data = {}
        self.token_data_lock = gevent.coros.Semaphore(1)

        self.context = zmq.Context()
        self.work_pull = self.context.socket(zmq.PULL)
        self.work_pull.ipv6 = ipv6
        self.work_pull.connect(work_endpoint)
        self.results_push = self.context.socket(zmq.PUSH)
        self.results_push.ipv6 = ipv6
        self.results_push.connect(results_endpoint)

        self.result_queue = gevent.queue.Queue()
Exemplo n.º 3
0
    def run_scenario(self, scenario, auth_kwargs, run_results, noop=False,
                     with_profiling=False, keep_objects=False, batch_size=1):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :param scenario: Scenario object describing the benchmark run
        :param auth_kwargs: All-you-can-eat dictionary of
                            authentication-related arguments.
        :param run_results: RunResults objects for the run
        :param noop: Run in no-op mode?
        :param with_profiing: Profile the run?
        :param keep_objects: Keep uploaded objects instead of deleting them?
        :param batch_size: Send this many bench jobs per packet to workers
        :param returns: Collected result records from workers
        """

        run_state = RunState()

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        raise_file_descriptor_limit()

        # Construct auth_kwargs appropriate for client.get_auth()
        if auth_kwargs.get('token'):
            auth_kwargs = {
                'storage_urls': auth_kwargs['storage_urls'],
                'token': auth_kwargs['token'],
            }

        # Ensure containers exist
        if not noop:
            storage_urls, c_token = self._authenticate(auth_kwargs)

            logging.info('Ensuring %d containers (%s_*) exist; '
                         'concurrency=%d...',
                         len(scenario.containers), scenario.container_base,
                         scenario.container_concurrency)
            pool = gevent.pool.Pool(scenario.container_concurrency)
            for container in scenario.containers:
                pool.spawn(_container_creator, storage_urls, c_token,
                           container, policy=scenario.policy)
            pool.join()

        # Enqueue initialization jobs
        if not noop:
            logging.info('Initializing cluster with stock data (up to %d '
                         'concurrent workers)', scenario.user_count)

            self.do_a_run(scenario.user_count, scenario.initial_jobs(),
                          run_state.handle_initialization_result, auth_kwargs,
                          batch_size=batch_size)

        logging.info('Starting benchmark run (up to %d concurrent '
                     'workers)', scenario.user_count)
        if noop:
            logging.info('  (not actually talking to Swift cluster!)')

        if with_profiling:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()
        self.do_a_run(scenario.user_count, scenario.bench_jobs(),
                      run_state.handle_run_result, auth_kwargs,
                      mapper_fn=run_state.fill_in_job,
                      label='Benchmark Run:', noop=noop, batch_size=batch_size,
                      run_results=run_results)
        if with_profiling:
            prof.disable()
            prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
            prof.dump_stats(prof_output_path)
            logging.info('PROFILED main do_a_run to %s', prof_output_path)

        if not noop and not keep_objects:
            logging.info('Deleting population objects from cluster')
            self.do_a_run(scenario.user_count,
                          run_state.cleanup_object_infos(),
                          lambda *_: None,
                          auth_kwargs, mapper_fn=_gen_cleanup_job,
                          batch_size=batch_size)
        elif keep_objects:
            logging.info('NOT deleting any objects due to -k/--keep-objects')
Exemplo n.º 4
0
    def run_scenario(self,
                     scenario,
                     auth_kwargs,
                     run_results,
                     noop=False,
                     with_profiling=False,
                     keep_objects=False,
                     batch_size=1):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :param scenario: Scenario object describing the benchmark run
        :param auth_kwargs: All-you-can-eat dictionary of
                            authentication-related arguments.
        :param run_results: RunResults objects for the run
        :param noop: Run in no-op mode?
        :param with_profiing: Profile the run?
        :param keep_objects: Keep uploaded objects instead of deleting them?
        :param batch_size: Send this many bench jobs per packet to workers
        :param returns: Collected result records from workers
        """

        run_state = RunState()

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        raise_file_descriptor_limit()

        # Construct auth_kwargs appropriate for client.get_auth()
        if auth_kwargs.get('token'):
            auth_kwargs = {
                'storage_urls': auth_kwargs['storage_urls'],
                'token': auth_kwargs['token'],
            }

        # Ensure containers exist
        if not noop:
            storage_urls, c_token = self._authenticate(auth_kwargs)

            logging.info(
                'Ensuring %d containers (%s_*) exist; '
                'concurrency=%d...', len(scenario.containers),
                scenario.container_base, scenario.container_concurrency)
            pool = gevent.pool.Pool(scenario.container_concurrency)
            for container in scenario.containers:
                pool.spawn(_container_creator,
                           storage_urls,
                           c_token,
                           container,
                           policy=scenario.policy)
            pool.join()

        # Enqueue initialization jobs
        if not noop:
            logging.info(
                'Initializing cluster with stock data (up to %d '
                'concurrent workers)', scenario.user_count)

            self.do_a_run(scenario.user_count,
                          scenario.initial_jobs(),
                          run_state.handle_initialization_result,
                          auth_kwargs,
                          batch_size=batch_size)

        logging.info('Starting benchmark run (up to %d concurrent '
                     'workers)', scenario.user_count)
        if noop:
            logging.info('  (not actually talking to Swift cluster!)')

        if with_profiling:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()
        self.do_a_run(scenario.user_count,
                      scenario.bench_jobs(),
                      run_state.handle_run_result,
                      auth_kwargs,
                      mapper_fn=run_state.fill_in_job,
                      label='Benchmark Run:',
                      noop=noop,
                      batch_size=batch_size,
                      run_results=run_results)
        if with_profiling:
            prof.disable()
            prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
            prof.dump_stats(prof_output_path)
            logging.info('PROFILED main do_a_run to %s', prof_output_path)

        if not noop and not keep_objects:
            logging.info('Deleting population objects from cluster')
            self.do_a_run(scenario.user_count,
                          run_state.cleanup_object_infos(),
                          lambda *_: None,
                          auth_kwargs,
                          mapper_fn=_gen_cleanup_job,
                          batch_size=batch_size)
        elif keep_objects:
            logging.info('NOT deleting any objects due to -k/--keep-objects')