Esempio n. 1
0
    def run_scenario(self, scenario, auth_kwargs, run_results, noop=False,
                     with_profiling=False, keep_objects=False, batch_size=1):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :param scenario: Scenario object describing the benchmark run
        :param auth_kwargs: All-you-can-eat dictionary of
                            authentication-related arguments.
        :param run_results: RunResults objects for the run
        :param noop: Run in no-op mode?
        :param with_profiing: Profile the run?
        :param keep_objects: Keep uploaded objects instead of deleting them?
        :param batch_size: Send this many bench jobs per packet to workers
        :param returns: Collected result records from workers
        """

        run_state = RunState()

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        raise_file_descriptor_limit()

        # Construct auth_kwargs appropriate for client.get_auth()
        if auth_kwargs.get('token'):
            auth_kwargs = {
                'storage_urls': auth_kwargs['storage_urls'],
                'token': auth_kwargs['token'],
            }

        # Ensure containers exist
        if not noop:
            storage_urls, c_token = self._authenticate(auth_kwargs)

            logging.info('Ensuring %d containers (%s_*) exist; '
                         'concurrency=%d...',
                         len(scenario.containers), scenario.container_base,
                         scenario.container_concurrency)
            pool = gevent.pool.Pool(scenario.container_concurrency)
            for container in scenario.containers:
                pool.spawn(_container_creator, storage_urls, c_token,
                           container, policy=scenario.policy)
            pool.join()

        # Enqueue initialization jobs
        if not noop:
            logging.info('Initializing cluster with stock data (up to %d '
                         'concurrent workers)', scenario.user_count)

            self.do_a_run(scenario.user_count, scenario.initial_jobs(),
                          run_state.handle_initialization_result, auth_kwargs,
                          batch_size=batch_size)

        logging.info('Starting benchmark run (up to %d concurrent '
                     'workers)', scenario.user_count)
        if noop:
            logging.info('  (not actually talking to Swift cluster!)')

        if with_profiling:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()
        self.do_a_run(scenario.user_count, scenario.bench_jobs(),
                      run_state.handle_run_result, auth_kwargs,
                      mapper_fn=run_state.fill_in_job,
                      label='Benchmark Run:', noop=noop, batch_size=batch_size,
                      run_results=run_results)
        if with_profiling:
            prof.disable()
            prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
            prof.dump_stats(prof_output_path)
            logging.info('PROFILED main do_a_run to %s', prof_output_path)

        if not noop and not keep_objects:
            logging.info('Deleting population objects from cluster')
            self.do_a_run(scenario.user_count,
                          run_state.cleanup_object_infos(),
                          lambda *_: None,
                          auth_kwargs, mapper_fn=_gen_cleanup_job,
                          batch_size=batch_size)
        elif keep_objects:
            logging.info('NOT deleting any objects due to -k/--keep-objects')
Esempio n. 2
0
    def run_scenario(self, scenario, auth_url, user, key, auth_version,
                     os_options, cacert, insecure, storage_url, token,
                     noop=False, with_profiling=False, keep_objects=False,
                     batch_size=1):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :param scenario: Scenario object describing the benchmark run
        :param auth_url: Authentication URL for the Swift cluster
        :param user: Account/Username to use (format is <account>:<username>)
        :param key: Password for the Account/Username
        :param auth_version: OpenStack auth version, default is 1.0
        :param os_options: The OpenStack options which can have tenant_id,
                           auth_token, service_type, endpoint_type,
                           tenant_name, object_storage_url, region_name
        :param insecure: Allow to access insecure keystone server.
                         The keystone's certificate will not be verified.
        :param cacert: Bundle file to use in verifying SSL.
        :param storage_url: Optional user-specified x-storage-url
        :param token: Optional user-specified x-auth-token
        :param noop: Run in no-op mode?
        :param with_profiing: Profile the run?
        :param keep_objects: Keep uploaded objects instead of deleting them?
        :param batch_size: Send this many bench jobs per packet to workers
        :param returns: Collected result records from workers
        """

        run_state = RunState()

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        soft_nofile, hard_nofile = resource.getrlimit(resource.RLIMIT_NOFILE)
        nofile_target = 1024
        if os.geteuid() == 0:
            nofile_target = max(nofile_target, scenario.user_count + 20)
            hard_nofile = nofile_target
        resource.setrlimit(resource.RLIMIT_NOFILE, (nofile_target,
                                                    hard_nofile))

        # Construct auth_kwargs appropriate for client.get_auth()
        if not token:
            auth_kwargs = dict(
                auth_url=auth_url, user=user, key=key,
                auth_version=auth_version, os_options=os_options,
                cacert=cacert, insecure=insecure, storage_url=storage_url)
        else:
            auth_kwargs = dict(storage_url=storage_url, token=token)

        # Ensure containers exist
        if not noop:
            if not token:
                logging.debug('Authenticating to %s with %s/%s', auth_url,
                              user, key)
                c_storage_url, c_token = client.get_auth(**auth_kwargs)
                if storage_url:
                    logging.debug('Overriding auth storage url %s with %s',
                                  c_storage_url, storage_url)
                    c_storage_url = storage_url
            else:
                c_storage_url, c_token = storage_url, token
                logging.debug('Using token %s at %s', c_token, c_storage_url)

            logging.info('Ensuring %d containers (%s_*) exist; '
                         'concurrency=%d...',
                         len(scenario.containers), scenario.container_base,
                         scenario.container_concurrency)
            pool = gevent.pool.Pool(scenario.container_concurrency)
            for container in scenario.containers:
                pool.spawn(_container_creator, c_storage_url, c_token,
                           container)
            pool.join()

        # Enqueue initialization jobs
        if not noop:
            logging.info('Initializing cluster with stock data (up to %d '
                         'concurrent workers)', scenario.user_count)

            self.do_a_run(scenario.user_count, scenario.initial_jobs(),
                          run_state.handle_initialization_result, auth_kwargs,
                          batch_size=batch_size)

        logging.info('Starting benchmark run (up to %d concurrent '
                     'workers)', scenario.user_count)
        if noop:
            logging.info('  (not actually talking to Swift cluster!)')

        if with_profiling:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()
        self.do_a_run(scenario.user_count, scenario.bench_jobs(),
                      run_state.handle_run_result, auth_kwargs,
                      mapper_fn=run_state.fill_in_job,
                      label='Benchmark Run:', noop=noop, batch_size=batch_size)
        if with_profiling:
            prof.disable()
            prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
            prof.dump_stats(prof_output_path)
            logging.info('PROFILED main do_a_run to %s', prof_output_path)

        if not noop and not keep_objects:
            logging.info('Deleting population objects from cluster')
            self.do_a_run(scenario.user_count,
                          run_state.cleanup_object_infos(),
                          lambda *_: None,
                          auth_kwargs, mapper_fn=_gen_cleanup_job,
                          batch_size=batch_size)
        elif keep_objects:
            logging.info('NOT deleting any objects due to -k/--keep-objects')

        return run_state.run_results
Esempio n. 3
0
 def setUp(self):
     self.run_state = RunState()
Esempio n. 4
0
    def run_scenario(self,
                     scenario,
                     auth_kwargs,
                     run_results,
                     noop=False,
                     with_profiling=False,
                     keep_objects=False,
                     batch_size=1):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :param scenario: Scenario object describing the benchmark run
        :param auth_kwargs: All-you-can-eat dictionary of
                            authentication-related arguments.
        :param run_results: RunResults objects for the run
        :param noop: Run in no-op mode?
        :param with_profiing: Profile the run?
        :param keep_objects: Keep uploaded objects instead of deleting them?
        :param batch_size: Send this many bench jobs per packet to workers
        :param returns: Collected result records from workers
        """

        run_state = RunState()

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        raise_file_descriptor_limit()

        # Construct auth_kwargs appropriate for client.get_auth()
        if auth_kwargs.get('token'):
            auth_kwargs = {
                'storage_urls': auth_kwargs['storage_urls'],
                'token': auth_kwargs['token'],
            }

        # Ensure containers exist
        if not noop:
            storage_urls, c_token = self._authenticate(auth_kwargs)

            logging.info(
                'Ensuring %d containers (%s_*) exist; '
                'concurrency=%d...', len(scenario.containers),
                scenario.container_base, scenario.container_concurrency)
            pool = gevent.pool.Pool(scenario.container_concurrency)
            for container in scenario.containers:
                pool.spawn(_container_creator,
                           storage_urls,
                           c_token,
                           container,
                           policy=scenario.policy)
            pool.join()

        # Enqueue initialization jobs
        if not noop:
            logging.info(
                'Initializing cluster with stock data (up to %d '
                'concurrent workers)', scenario.user_count)

            self.do_a_run(scenario.user_count,
                          scenario.initial_jobs(),
                          run_state.handle_initialization_result,
                          auth_kwargs,
                          batch_size=batch_size)

        logging.info('Starting benchmark run (up to %d concurrent '
                     'workers)', scenario.user_count)
        if noop:
            logging.info('  (not actually talking to Swift cluster!)')

        if with_profiling:
            import cProfile
            prof = cProfile.Profile()
            prof.enable()
        self.do_a_run(scenario.user_count,
                      scenario.bench_jobs(),
                      run_state.handle_run_result,
                      auth_kwargs,
                      mapper_fn=run_state.fill_in_job,
                      label='Benchmark Run:',
                      noop=noop,
                      batch_size=batch_size,
                      run_results=run_results)
        if with_profiling:
            prof.disable()
            prof_output_path = '/tmp/do_a_run.%d.prof' % os.getpid()
            prof.dump_stats(prof_output_path)
            logging.info('PROFILED main do_a_run to %s', prof_output_path)

        if not noop and not keep_objects:
            logging.info('Deleting population objects from cluster')
            self.do_a_run(scenario.user_count,
                          run_state.cleanup_object_infos(),
                          lambda *_: None,
                          auth_kwargs,
                          mapper_fn=_gen_cleanup_job,
                          batch_size=batch_size)
        elif keep_objects:
            logging.info('NOT deleting any objects due to -k/--keep-objects')
Esempio n. 5
0
class TestRunState(object):
    def setUp(self):
        self.run_state = RunState()

    def _fill_initial_results(self):
        initial_results = [{
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
            'size': 88,
        }]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [
                ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT,
                ssbench.DELETE_OBJECT
        ]:
            initial_results.append({
                'type': t,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj1',
            })
        initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket1',
            'name': 'obj1',
            'size': 89,
        })
        # exception results are saved but not added to the deque
        initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
            'exception': 'oh noes!',
        })
        initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket0',
            'name': 'obj2',
            'size': 90,
        })
        for r in initial_results:
            self.run_state.handle_initialization_result(r)

    def _fill_run_results(self):
        run_results = [{
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket0',
            'name': 'obj3',
            'size': 77,
        }]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [
                ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT,
                ssbench.DELETE_OBJECT
        ]:
            run_results.append({
                'type': t,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj8',
            })
        run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket3',
            'name': 'obj4',
            'size': 89,
        })
        # exception results are saved but not added to the deque
        run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket3',
            'name': 'obj5',
            'exception': 'oh noes!',
        })
        run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket1',
            'name': 'obj6',
            'size': 90,
        })
        for r in run_results:
            self.run_state.handle_run_result(r)

    def test_handle_initialization_result(self):
        self._fill_initial_results()
        assert_equal(
            self.run_state.objs_by_size, {
                'obtuse':
                deque([('bucket0', 'obj1', True), ('bucket1', 'obj1', True)]),
                'round':
                deque([('bucket0', 'obj2', True)]),
            })

    def test_handle_run_result(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.objs_by_size,
            {
                'obtuse':
                deque([('bucket0', 'obj1', True), ('bucket1', 'obj1', True),
                       ('bucket3', 'obj4', False)]),  # not "initial"
                'round':
                deque([('bucket0', 'obj2', True), ('bucket0', 'obj3', False),
                       ('bucket1', 'obj6', False)]),
            })

    def test_cleanup_object_infos(self):
        self._fill_initial_results()
        self._fill_run_results()
        cleanups = list(self.run_state.cleanup_object_infos())

        assert_equal(cleanups, [('bucket3', 'obj4', False),
                                ('bucket0', 'obj3', False),
                                ('bucket1', 'obj6', False)])
        assert_equal(
            self.run_state.objs_by_size,
            {
                'obtuse':
                deque([('bucket0', 'obj1', True),
                       ('bucket1', 'obj1', True)]),  # not "initial"
                'round':
                deque([('bucket0', 'obj2', True)]),
            })

    def test_cleanup_object_infos_with_no_initials(self):
        self._fill_run_results()
        cleanups = set(self.run_state.cleanup_object_infos())

        assert_set_equal(
            cleanups,
            set([('bucket3', 'obj4', False), ('bucket0', 'obj3', False),
                 ('bucket1', 'obj6', False)]))
        # There were no initials, so there's nothing left:
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque(),
            'round': deque(),
        })

    def test_fill_in_job_for_create_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.CREATE_OBJECT,
                'creates': 'are ignored',
            }), {
                'type': ssbench.CREATE_OBJECT,
                'creates': 'are ignored',
            })
        assert_equal(
            self.run_state.objs_by_size, {
                'obtuse':
                deque([('bucket0', 'obj1', True), ('bucket1', 'obj1', True),
                       ('bucket3', 'obj4', False)]),
                'round':
                deque([('bucket0', 'obj2', True), ('bucket0', 'obj3', False),
                       ('bucket1', 'obj6', False)]),
            })

    def test_fill_in_job_for_delete_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
            }), {
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj1',
            })
        assert_equal(
            self.run_state.objs_by_size, {
                'obtuse':
                deque([('bucket1', 'obj1', True), ('bucket3', 'obj4', False)]),
                'round':
                deque([('bucket0', 'obj2', True), ('bucket0', 'obj3', False),
                       ('bucket1', 'obj6', False)]),
            })

    def test_fill_in_job_for_update_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.UPDATE_OBJECT,
                'size_str': 'round',
                'size': 991,
            }), {
                'type': ssbench.UPDATE_OBJECT,
                'size_str': 'round',
                'size': 991,
                'container': 'bucket0',
                'name': 'obj2',
            })
        assert_equal(
            self.run_state.objs_by_size,
            {
                'obtuse':
                deque([('bucket0', 'obj1', True), ('bucket1', 'obj1', True),
                       ('bucket3', 'obj4', False)]),
                'round':
                deque([
                    ('bucket0', 'obj3', False),  # (got rotated)
                    ('bucket1', 'obj6', False),
                    ('bucket0', 'obj2', True)
                ]),
            })

    def test_fill_in_job_for_read_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.READ_OBJECT,
                'size_str': 'obtuse',
            }), {
                'type': ssbench.READ_OBJECT,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj1',
            })
        assert_equal(
            self.run_state.objs_by_size,
            {
                'obtuse':
                deque([
                    ('bucket1', 'obj1', True),  # (got rotated)
                    ('bucket3', 'obj4', False),
                    ('bucket0', 'obj1', True)
                ]),
                'round':
                deque([('bucket0', 'obj2', True), ('bucket0', 'obj3', False),
                       ('bucket1', 'obj6', False)]),
            })

    def test_fill_in_job_when_empty(self):
        self._fill_initial_results()
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
            }), {
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj1',
            })
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
            }), {
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
                'container': 'bucket1',
                'name': 'obj1',
            })
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'obtuse',
            }), None)
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.UPDATE_OBJECT,
                'size_str': 'obtuse',
                'size': 31,
            }), None)

        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'round',
            }), {
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'round',
                'container': 'bucket0',
                'name': 'obj2',
            })
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.DELETE_OBJECT,
                'size_str': 'round',
            }), None)
        assert_equal(
            self.run_state.fill_in_job({
                'type': ssbench.READ_OBJECT,
                'size_str': 'round',
            }), None)

        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([]),
            'round': deque([]),
        })
Esempio n. 6
0
 def setUp(self):
     self.run_state = RunState()
Esempio n. 7
0
class TestRunState(object):
    def setUp(self):
        self.run_state = RunState()

    def _fill_initial_results(self):
        self.initial_results = [{
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
            'size': 88,
        }]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT,
                  ssbench.DELETE_OBJECT]:
            self.initial_results.append({
                'type': t,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj1',
            })
        self.initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket1',
            'name': 'obj1',
            'size': 89,
        })
        # exception results are saved but not added to the deque
        self.initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
            'exception': 'oh noes!',
        })
        self.initial_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket0',
            'name': 'obj2',
            'size': 90,
        })
        for r in self.initial_results:
            self.run_state.handle_initialization_result(r)

    def _fill_run_results(self):
        self.run_results = [{
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket0',
            'name': 'obj3',
            'size': 77,
        }]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT,
                  ssbench.DELETE_OBJECT]:
            self.run_results.append({
                'type': t,
                'size_str': 'obtuse',
                'container': 'bucket0',
                'name': 'obj8',
            })
        self.run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket3',
            'name': 'obj4',
            'size': 89,
        })
        # exception results are saved but not added to the deque
        self.run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket3',
            'name': 'obj5',
            'exception': 'oh noes!',
        })
        self.run_results.append({
            'type': ssbench.CREATE_OBJECT,
            'size_str': 'round',
            'container': 'bucket1',
            'name': 'obj6',
            'size': 90,
        })
        for r in self.run_results:
            self.run_state.handle_run_result(r)

    def test_handle_initialization_result(self):
        self._fill_initial_results()
        assert_equal(self.run_state.initialization_results,
                     self.initial_results)
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket0', 'obj1', True),
                ('bucket1', 'obj1', True)]),
            'round': deque([('bucket0', 'obj2', True)]),
        })

    def test_handle_run_result(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(self.run_state.run_results,
                     self.run_results)
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket0', 'obj1', True),
                ('bucket1', 'obj1', True),
                ('bucket3', 'obj4', False)]),  # not "initial"
            'round': deque([
                ('bucket0', 'obj2', True),
                ('bucket0', 'obj3', False),
                ('bucket1', 'obj6', False)]),
        })

    def test_cleanup_object_infos(self):
        self._fill_initial_results()
        self._fill_run_results()
        cleanups = list(self.run_state.cleanup_object_infos())

        assert_equal(cleanups, [('bucket3', 'obj4', False),
                                ('bucket0', 'obj3', False),
                                ('bucket1', 'obj6', False)])
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket0', 'obj1', True),
                ('bucket1', 'obj1', True)]),  # not "initial"
            'round': deque([
                ('bucket0', 'obj2', True)]),
        })

    def test_fill_in_job_for_create_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.CREATE_OBJECT,
            'creates': 'are ignored',
        }), {
            'type': ssbench.CREATE_OBJECT,
            'creates': 'are ignored',
        })
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket0', 'obj1', True),
                ('bucket1', 'obj1', True),
                ('bucket3', 'obj4', False)]),
            'round': deque([
                ('bucket0', 'obj2', True),
                ('bucket0', 'obj3', False),
                ('bucket1', 'obj6', False)]),
        })

    def test_fill_in_job_for_delete_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
        }), {
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
        })
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket1', 'obj1', True),
                ('bucket3', 'obj4', False)]),
            'round': deque([
                ('bucket0', 'obj2', True),
                ('bucket0', 'obj3', False),
                ('bucket1', 'obj6', False)]),
        })

    def test_fill_in_job_for_update_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.UPDATE_OBJECT,
            'size_str': 'round',
            'size': 991,
        }), {
            'type': ssbench.UPDATE_OBJECT,
            'size_str': 'round',
            'size': 991,
            'container': 'bucket0',
            'name': 'obj2',
        })
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket0', 'obj1', True),
                ('bucket1', 'obj1', True),
                ('bucket3', 'obj4', False)]),
            'round': deque([
                ('bucket0', 'obj3', False),  # (got rotated)
                ('bucket1', 'obj6', False),
                ('bucket0', 'obj2', True)]),
        })

    def test_fill_in_job_for_read_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.READ_OBJECT,
            'size_str': 'obtuse',
        }), {
            'type': ssbench.READ_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
        })
        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([
                ('bucket1', 'obj1', True),  # (got rotated)
                ('bucket3', 'obj4', False),
                ('bucket0', 'obj1', True)]),
            'round': deque([
                ('bucket0', 'obj2', True),
                ('bucket0', 'obj3', False),
                ('bucket1', 'obj6', False)]),
        })

    def test_fill_in_job_when_empty(self):
        self._fill_initial_results()
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
        }), {
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket0',
            'name': 'obj1',
        })
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
        }), {
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
            'container': 'bucket1',
            'name': 'obj1',
        })
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'obtuse',
        }), None)
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.UPDATE_OBJECT,
            'size_str': 'obtuse',
            'size': 31,
        }), None)

        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'round',
        }), {
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'round',
            'container': 'bucket0',
            'name': 'obj2',
        })
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.DELETE_OBJECT,
            'size_str': 'round',
        }), None)
        assert_equal(self.run_state.fill_in_job({
            'type': ssbench.READ_OBJECT,
            'size_str': 'round',
        }), None)

        assert_equal(self.run_state.objs_by_size, {
            'obtuse': deque([]),
            'round': deque([]),
        })
Esempio n. 8
0
class TestRunState(object):
    def setUp(self):
        self.run_state = RunState()

    def _fill_initial_results(self):
        initial_results = [
            {"type": ssbench.CREATE_OBJECT, "size_str": "obtuse", "container": "bucket0", "name": "obj1", "size": 88}
        ]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT, ssbench.DELETE_OBJECT]:
            initial_results.append({"type": t, "size_str": "obtuse", "container": "bucket0", "name": "obj1"})
        initial_results.append(
            {"type": ssbench.CREATE_OBJECT, "size_str": "obtuse", "container": "bucket1", "name": "obj1", "size": 89}
        )
        # exception results are saved but not added to the deque
        initial_results.append(
            {
                "type": ssbench.CREATE_OBJECT,
                "size_str": "obtuse",
                "container": "bucket0",
                "name": "obj1",
                "exception": "oh noes!",
            }
        )
        initial_results.append(
            {"type": ssbench.CREATE_OBJECT, "size_str": "round", "container": "bucket0", "name": "obj2", "size": 90}
        )
        for r in initial_results:
            self.run_state.handle_initialization_result(r)

    def _fill_run_results(self):
        run_results = [
            {"type": ssbench.CREATE_OBJECT, "size_str": "round", "container": "bucket0", "name": "obj3", "size": 77}
        ]
        # non-CREATE_OBJECT results are saved but not added to the deque
        for t in [ssbench.READ_OBJECT, ssbench.UPDATE_OBJECT, ssbench.DELETE_OBJECT]:
            run_results.append({"type": t, "size_str": "obtuse", "container": "bucket0", "name": "obj8"})
        run_results.append(
            {"type": ssbench.CREATE_OBJECT, "size_str": "obtuse", "container": "bucket3", "name": "obj4", "size": 89}
        )
        # exception results are saved but not added to the deque
        run_results.append(
            {
                "type": ssbench.CREATE_OBJECT,
                "size_str": "obtuse",
                "container": "bucket3",
                "name": "obj5",
                "exception": "oh noes!",
            }
        )
        run_results.append(
            {"type": ssbench.CREATE_OBJECT, "size_str": "round", "container": "bucket1", "name": "obj6", "size": 90}
        )
        for r in run_results:
            self.run_state.handle_run_result(r)

    def test_handle_initialization_result(self):
        self._fill_initial_results()
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque([("bucket0", "obj1", True), ("bucket1", "obj1", True)]),
                "round": deque([("bucket0", "obj2", True)]),
            },
        )

    def test_handle_run_result(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque(
                    [("bucket0", "obj1", True), ("bucket1", "obj1", True), ("bucket3", "obj4", False)]
                ),  # not "initial"
                "round": deque([("bucket0", "obj2", True), ("bucket0", "obj3", False), ("bucket1", "obj6", False)]),
            },
        )

    def test_cleanup_object_infos(self):
        self._fill_initial_results()
        self._fill_run_results()
        cleanups = list(self.run_state.cleanup_object_infos())

        assert_equal(cleanups, [("bucket3", "obj4", False), ("bucket0", "obj3", False), ("bucket1", "obj6", False)])
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque([("bucket0", "obj1", True), ("bucket1", "obj1", True)]),  # not "initial"
                "round": deque([("bucket0", "obj2", True)]),
            },
        )

    def test_cleanup_object_infos_with_no_initials(self):
        self._fill_run_results()
        cleanups = set(self.run_state.cleanup_object_infos())

        assert_set_equal(
            cleanups, set([("bucket3", "obj4", False), ("bucket0", "obj3", False), ("bucket1", "obj6", False)])
        )
        # There were no initials, so there's nothing left:
        assert_equal(self.run_state.objs_by_size, {"obtuse": deque(), "round": deque()})

    def test_fill_in_job_for_create_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.CREATE_OBJECT, "creates": "are ignored"}),
            {"type": ssbench.CREATE_OBJECT, "creates": "are ignored"},
        )
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque([("bucket0", "obj1", True), ("bucket1", "obj1", True), ("bucket3", "obj4", False)]),
                "round": deque([("bucket0", "obj2", True), ("bucket0", "obj3", False), ("bucket1", "obj6", False)]),
            },
        )

    def test_fill_in_job_for_delete_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "obtuse"}),
            {"type": ssbench.DELETE_OBJECT, "size_str": "obtuse", "container": "bucket0", "name": "obj1"},
        )
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque([("bucket1", "obj1", True), ("bucket3", "obj4", False)]),
                "round": deque([("bucket0", "obj2", True), ("bucket0", "obj3", False), ("bucket1", "obj6", False)]),
            },
        )

    def test_fill_in_job_for_update_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.UPDATE_OBJECT, "size_str": "round", "size": 991}),
            {"type": ssbench.UPDATE_OBJECT, "size_str": "round", "size": 991, "container": "bucket0", "name": "obj2"},
        )
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque([("bucket0", "obj1", True), ("bucket1", "obj1", True), ("bucket3", "obj4", False)]),
                "round": deque(
                    [("bucket0", "obj3", False), ("bucket1", "obj6", False), ("bucket0", "obj2", True)]  # (got rotated)
                ),
            },
        )

    def test_fill_in_job_for_read_object(self):
        self._fill_initial_results()
        self._fill_run_results()
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.READ_OBJECT, "size_str": "obtuse"}),
            {"type": ssbench.READ_OBJECT, "size_str": "obtuse", "container": "bucket0", "name": "obj1"},
        )
        assert_equal(
            self.run_state.objs_by_size,
            {
                "obtuse": deque(
                    [("bucket1", "obj1", True), ("bucket3", "obj4", False), ("bucket0", "obj1", True)]  # (got rotated)
                ),
                "round": deque([("bucket0", "obj2", True), ("bucket0", "obj3", False), ("bucket1", "obj6", False)]),
            },
        )

    def test_fill_in_job_when_empty(self):
        self._fill_initial_results()
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "obtuse"}),
            {"type": ssbench.DELETE_OBJECT, "size_str": "obtuse", "container": "bucket0", "name": "obj1"},
        )
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "obtuse"}),
            {"type": ssbench.DELETE_OBJECT, "size_str": "obtuse", "container": "bucket1", "name": "obj1"},
        )
        assert_equal(self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "obtuse"}), None)
        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.UPDATE_OBJECT, "size_str": "obtuse", "size": 31}), None
        )

        assert_equal(
            self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "round"}),
            {"type": ssbench.DELETE_OBJECT, "size_str": "round", "container": "bucket0", "name": "obj2"},
        )
        assert_equal(self.run_state.fill_in_job({"type": ssbench.DELETE_OBJECT, "size_str": "round"}), None)
        assert_equal(self.run_state.fill_in_job({"type": ssbench.READ_OBJECT, "size_str": "round"}), None)

        assert_equal(self.run_state.objs_by_size, {"obtuse": deque([]), "round": deque([])})
Esempio n. 9
0
    def run_scenario(self, auth_url, user, key, storage_url, token, scenario):
        """
        Runs a CRUD scenario, given cluster parameters and a Scenario object.

        :auth_url: Authentication URL for the Swift cluster
        :user: Account/Username to use (format is <account>:<username>)
        :key: Password for the Account/Username
        :scenario: Scenario object describing the benchmark run
        :returns: Collected result records from workers
        """

        run_state = RunState()

        self.drain_stats_queue()
        if not storage_url or not token:
            logging.debug("Authenticating to %s with %s/%s", auth_url, user, key)
            storage_url, token = client.get_auth(auth_url, user, key)
        else:
            logging.debug("Using token %s at %s", token, storage_url)

        logging.info(u'Starting scenario run for "%s"', scenario.name)

        # Ensure containers exist
        logging.info(
            "Ensuring %d containers (%s_*) exist; concurrency=%d...",
            len(scenario.containers),
            scenario.container_base,
            scenario.container_concurrency,
        )
        pool = eventlet.GreenPool(scenario.container_concurrency)
        for container in scenario.containers:
            pool.spawn_n(_container_creator, storage_url, token, container)
        pool.waitall()

        self.queue.use(ssbench.WORK_TUBE)

        # Enqueue initialization jobs
        logging.info("Initializing cluster with stock data (up to %d " "concurrent workers)", scenario.user_count)

        self.do_a_run(
            scenario.user_count,
            scenario.initial_jobs(),
            run_state.handle_initialization_result,
            ssbench.PRIORITY_SETUP,
            storage_url,
            token,
        )

        logging.info("Starting benchmark run (up to %d concurrent " "workers)", scenario.user_count)
        self.do_a_run(
            scenario.user_count,
            scenario.bench_jobs(),
            run_state.handle_run_result,
            ssbench.PRIORITY_WORK,
            storage_url,
            token,
            mapper_fn=run_state.fill_in_job,
            label="Benchmark Run:",
        )

        logging.info("Deleting population objects from cluster")
        self.do_a_run(
            scenario.user_count,
            run_state.cleanup_object_infos(),
            lambda *_: None,
            ssbench.PRIORITY_CLEANUP,
            storage_url,
            token,
            mapper_fn=_gen_cleanup_job,
        )

        return run_state.run_results