Exemple #1
0
    def cleanup_containers(self, auth_kwargs, container_base, concurrency):
        storage_urls, token = self._authenticate(auth_kwargs)

        _, container_list = client.get_account(
            random.choice(storage_urls), token)

        our_container_re = re.compile('%s_\d+$' % container_base)

        start_time = time.time()
        obj_count = 0
        container_count = 0
        pool = gevent.pool.Pool(concurrency)
        for container_info in container_list:
            # e.g. {'count': 41, 'bytes': 496485, 'name': 'doc'}
            if our_container_re.match(container_info['name']):
                pool.spawn(_container_deleter, concurrency, storage_urls,
                           token, container_info)
                container_count += 1
                obj_count += container_info['count']
            else:
                logging.debug('Ignoring non-ssbench container %r',
                              container_info['name'])
        pool.join()
        delta_t = time.time() - start_time
        logging.info('Deleted %.1f containers/s, %.1f objs/s',
                     container_count / delta_t, obj_count / delta_t)
Exemple #2
0
    def cleanup_containers(self, auth_kwargs, container_base, concurrency,
                           policy):
        storage_urls, token = self._authenticate(auth_kwargs)

        _, container_list = client.get_account(random.choice(storage_urls),
                                               token)

        our_container_re = re.compile(self.DELETER_RE %
                                      (container_base, policy))

        start_time = time.time()
        obj_count = 0
        container_count = 0
        pool = gevent.pool.Pool(concurrency)
        for container_info in container_list:
            # e.g. {'count': 41, 'bytes': 496485, 'name': 'doc'}
            if our_container_re.match(container_info['name']):
                pool.spawn(_container_deleter, concurrency, storage_urls,
                           token, container_info)
                container_count += 1
                obj_count += container_info['count']
            else:
                logging.debug('Ignoring non-ssbench container %r',
                              container_info['name'])
        pool.join()
        delta_t = time.time() - start_time
        logging.info('Deleted %.1f containers/s, %.1f objs/s',
                     container_count / delta_t, obj_count / delta_t)
Exemple #3
0
def _container_creator(storage_urls, token, container):
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    try:
        client.head_container(storage_url, token, container,
                              http_conn=http_conn)
    except client.ClientException:
        client.put_container(storage_url, token, container,
                             http_conn=http_conn)
Exemple #4
0
def _container_deleter(concurrency, storage_urls, token, container_info):
    container_name = container_info['name']
    logging.info('deleting %r (%d objs)', container_name,
                 container_info['count'])
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    _, obj_list = client.get_container(
        random.choice(storage_urls), token, container_name,
        http_conn=http_conn)

    pool = gevent.pool.Pool(concurrency)
    for obj_name in [o['name'] for o in obj_list]:
        pool.spawn(client.delete_object, random.choice(storage_urls), token,
                   container_name, obj_name)
    pool.join()

    client.delete_container(
        random.choice(storage_urls), token, container_name,
        http_conn=http_conn)
Exemple #5
0
def _container_creator(storage_urls, token, container, policy=None):
    put_headers = None if policy is None else {'x-storage-policy': policy}
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    try:
        client.head_container(storage_url, token, container,
                              http_conn=http_conn)
    except client.ClientException:
        client.put_container(storage_url, token, container,
                             headers=put_headers, http_conn=http_conn)
Exemple #6
0
def _container_deleter(concurrency, storage_urls, token, container_info):
    container_name = container_info['name']
    logging.info('deleting %r (%d objs)', container_name,
                 container_info['count'])
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    _, obj_list = client.get_container(random.choice(storage_urls),
                                       token,
                                       container_name,
                                       http_conn=http_conn)

    pool = gevent.pool.Pool(concurrency)
    for obj_name in [o['name'] for o in obj_list]:
        pool.spawn(client.delete_object, random.choice(storage_urls), token,
                   container_name, obj_name)
    pool.join()

    client.delete_container(random.choice(storage_urls),
                            token,
                            container_name,
                            http_conn=http_conn)
Exemple #7
0
def _container_creator(storage_urls, token, container):
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    try:
        client.head_container(storage_url,
                              token,
                              container,
                              http_conn=http_conn)
    except client.ClientException:
        client.put_container(storage_url,
                             token,
                             container,
                             http_conn=http_conn)
Exemple #8
0
def _container_creator(storage_urls, token, container, policy=None):
    put_headers = None if policy is None else {'x-storage-policy': policy}
    storage_url = random.choice(storage_urls)
    http_conn = client.http_connection(storage_url)
    try:
        client.head_container(storage_url,
                              token,
                              container,
                              http_conn=http_conn)
    except client.ClientException:
        client.put_container(storage_url,
                             token,
                             container,
                             headers=put_headers,
                             http_conn=http_conn)
Exemple #9
0
    def create_job(self, size_str, i, container=None, head_first=False):
        """
        Creates job dict which will create an object.
        """

        if container is None:
            container = random.choice(self.containers)

        return self.job(size_str,
                        type=ssbench.CREATE_OBJECT,
                        container=container,
                        name='%s_%06d' % (size_str, i),
                        size=random.randint(
                            self.sizes_by_name[size_str]['size_min'],
                            self.sizes_by_name[size_str]['size_max']),
                        block_size=self.block_size,
                        head_first=head_first,
                        delete_after=self.delete_after)
Exemple #10
0
    def create_job(self, size_str, i, container=None, head_first=False):
        """
        Creates job dict which will create an object.
        """

        if container is None:
            container = random.choice(self.containers)

        return self.job(size_str,
                        type=ssbench.CREATE_OBJECT,
                        container=container,
                        name='%s_%06d' % (size_str, i),
                        size=random.randint(
                            self.sizes_by_name[size_str]['size_min'],
                            self.sizes_by_name[size_str]['size_max']),
                        block_size=self.block_size,
                        head_first=head_first,
                        delete_after=self.delete_after)
Exemple #11
0
    def ignoring_http_responses(self, statuses, fn, call_info, **extra_keys):
        if 401 not in statuses:
            statuses += (401, )
        args = dict(
            container=call_info['container'],
            name=call_info['name'],
        )
        args.update(extra_keys)

        if 'auth_kwargs' not in call_info:
            raise ValueError('Got benchmark job without "auth_kwargs" key!')

        tries = 0
        while True:
            # Make sure we've got a current storage_url/token
            if call_info['auth_kwargs'].get('token', None):
                token_key = None
                args['url'] = random.choice(
                    call_info['auth_kwargs']['storage_urls'])
                args['token'] = call_info['auth_kwargs']['token']
            else:
                token_key = self._token_key(call_info['auth_kwargs'])
                if token_key not in self.token_data:
                    self.token_data_lock.acquire()
                    collided = False
                    try:
                        if token_key not in self.token_data:
                            logging.debug('Authenticating with %r',
                                          call_info['auth_kwargs'])
                            storage_url, token = client.get_auth(
                                **call_info['auth_kwargs'])
                            override_urls = call_info['auth_kwargs'].get(
                                'storage_urls', None)
                            if override_urls:
                                logging.debug(
                                    'Will override auth storage url %s with '
                                    'one of %r', storage_url, override_urls)
                                storage_urls = override_urls
                            else:
                                storage_urls = [storage_url]
                            self.token_data[token_key] = (storage_urls, token)
                        else:
                            collided = True
                    finally:
                        self.token_data_lock.release()
                    if collided:
                        # Wait just a little bit if we just collided with
                        # another greenthread's re-auth
                        logging.debug('Collided on re-auth; sleeping 0.005')
                        gevent.sleep(0.005)
                storage_urls, args['token'] = self.token_data[token_key]
                args['url'] = random.choice(storage_urls)

            # Check for connection pool initialization (protected by a
            # semaphore)
            if args['url'] not in self.conn_pools:
                self._create_connection_pool(
                    args['url'],
                    call_info.get('connect_timeout',
                                  client.DEFAULT_CONNECT_TIMEOUT),
                    call_info.get('network_timeout',
                                  client.DEFAULT_NETWORK_TIMEOUT))

            try:
                fn_results = None
                with self.connection(args['url']) as conn:
                    fn_results = fn(http_conn=conn, **args)
                if fn_results:
                    if tries != 0:
                        logging.info('%r succeeded after %d tries', call_info,
                                     tries)
                    break
                tries += 1
                if tries > self.max_retries:
                    e = Exception('No fn_results for %r after %d retires' %
                                  (fn, self.max_retries))
                    e.retries = tries - 1
                    raise e
            # XXX The name of this method does not suggest that it
            # will also retry on socket-level errors. Regardless,
            # sometimes Swift refuses connections (probably when it's
            # way overloaded and the listen socket's connection queue
            # (in the kernel) is full, so the kernel just says RST).
            #
            # UPDATE: connections should be handled by the ConnectionPool
            # (which will trap socket.error and retry after a slight delay), so
            # socket.error should NOT get raised here for connection failures.
            # So hopefully this socket.error trapping code path will not get
            # hit.
            except socket.error as error:
                tries += 1
                if tries > self.max_retries:
                    error.retries = tries - 1
                    raise error
            except client.ClientException as error:
                tries += 1
                if error.http_status in statuses and \
                        tries <= self.max_retries:
                    if error.http_status == 401 and token_key:
                        if token_key in self.token_data and \
                                self.token_data[token_key][1] == args['token']:
                            self.token_data_lock.acquire()
                            try:
                                if token_key in self.token_data and \
                                        self.token_data[token_key][1] == \
                                        args['token']:
                                    logging.debug(
                                        'Deleting token %s',
                                        self.token_data[token_key][1])
                                    del self.token_data[token_key]
                            finally:
                                self.token_data_lock.release()
                    logging.debug("Retrying an error: %r", error)
                else:
                    error.retries = tries - 1
                    raise error
        fn_results['retries'] = tries
        return fn_results
Exemple #12
0
    def ignoring_http_responses(self, statuses, fn, call_info, **extra_keys):
        if 401 not in statuses:
            statuses += (401,)
        args = dict(
            container=call_info['container'],
            name=call_info['name'],
        )
        args.update(extra_keys)

        if 'auth_kwargs' not in call_info:
            raise ValueError('Got benchmark job without "auth_kwargs" key!')

        tries = 0
        while True:
            # Make sure we've got a current storage_url/token
            if call_info['auth_kwargs'].get('token', None):
                token_key = None
                args['url'] = random.choice(
                    call_info['auth_kwargs']['storage_urls'])
                args['token'] = call_info['auth_kwargs']['token']
            else:
                token_key = self._token_key(call_info['auth_kwargs'])
                if token_key not in self.token_data:
                    self.token_data_lock.acquire()
                    collided = False
                    try:
                        if token_key not in self.token_data:
                            logging.debug('Authenticating with %r',
                                          call_info['auth_kwargs'])
                            storage_url, token = client.get_auth(
                                **call_info['auth_kwargs'])
                            override_urls = call_info['auth_kwargs'].get(
                                'storage_urls', None)
                            if override_urls:
                                logging.debug(
                                    'Will override auth storage url %s with '
                                    'one of %r', storage_url, override_urls)
                                storage_urls = override_urls
                            else:
                                storage_urls = [storage_url]
                            self.token_data[token_key] = (storage_urls, token)
                        else:
                            collided = True
                    finally:
                        self.token_data_lock.release()
                    if collided:
                        # Wait just a little bit if we just collided with
                        # another greenthread's re-auth
                        logging.debug('Collided on re-auth; sleeping 0.005')
                        gevent.sleep(0.005)
                storage_urls, args['token'] = self.token_data[token_key]
                args['url'] = random.choice(storage_urls)

            # Check for connection pool initialization (protected by a
            # semaphore)
            if args['url'] not in self.conn_pools:
                self._create_connection_pool(
                    args['url'],
                    call_info.get('connect_timeout',
                                  client.DEFAULT_CONNECT_TIMEOUT),
                    call_info.get('network_timeout',
                                  client.DEFAULT_NETWORK_TIMEOUT))

            try:
                fn_results = None
                with self.connection(args['url']) as conn:
                    fn_results = fn(http_conn=conn, **args)
                if fn_results:
                    if tries != 0:
                        logging.info('%r succeeded after %d tries',
                                     call_info, tries)
                    break
                tries += 1
                if tries > self.max_retries:
                    e = Exception('No fn_results for %r after %d retires' % (
                        fn, self.max_retries))
                    e.retries = tries - 1
                    raise e
            # XXX The name of this method does not suggest that it
            # will also retry on socket-level errors. Regardless,
            # sometimes Swift refuses connections (probably when it's
            # way overloaded and the listen socket's connection queue
            # (in the kernel) is full, so the kernel just says RST).
            #
            # UPDATE: connections should be handled by the ConnectionPool
            # (which will trap socket.error and retry after a slight delay), so
            # socket.error should NOT get raised here for connection failures.
            # So hopefully this socket.error trapping code path will not get
            # hit.
            except socket.error as error:
                tries += 1
                if tries > self.max_retries:
                    error.retries = tries - 1
                    raise error
            except client.ClientException as error:
                tries += 1
                if error.http_status in statuses and \
                        tries <= self.max_retries:
                    if error.http_status == 401 and token_key:
                        if token_key in self.token_data and \
                                self.token_data[token_key][1] == args['token']:
                            self.token_data_lock.acquire()
                            try:
                                if token_key in self.token_data and \
                                        self.token_data[token_key][1] == \
                                        args['token']:
                                    logging.debug(
                                        'Deleting token %s',
                                        self.token_data[token_key][1])
                                    del self.token_data[token_key]
                            finally:
                                self.token_data_lock.release()
                    logging.debug("Retrying an error: %r", error)
                else:
                    error.retries = tries - 1
                    raise error
        fn_results['retries'] = tries
        return fn_results