def __init__(self): # Create one user for ecs # Need to export ECSTEST_TEST_TARGET='ECS' # for ECS runtest cfg file if self.cfg['TEST_TARGET'] == constants.TARGET_ECS: self.username = uuid.uuid4().hex logger.debug('username is ' + self.username) self.user_admin = usermgmt.UserAdmin() user_info = self.user_admin.create_user(self.username) logger.debug('user_info is ' + str(user_info)) self.secret_key = self.user_admin.create_secret_key(self.username) logger.debug('secret_key is ' + str(self.secret_key)) # Get another user info from config for aws # Need to export ECSTEST_ALT_ACCESS_KEY # and ECSTEST_ALT_ACCESS_SECRET # for AWS runtest cfg file. elif self.cfg['TEST_TARGET'] == constants.TARGET_AWSS3: self.username = self.cfg['ALT_ACCESS_KEY'] logger.debug('username is ' + self.username) self.secret_key = self.cfg['ALT_ACCESS_SECRET'] logger.debug('secret_key is ' + self.secret_key) else: raise Exception('Can not create another user!')
def tearDown(self): for bucket in self.bucket_list: try: logger.debug("delete all keys in bucket: %s", bucket.name) utils.delete_keys(bucket, self.target) self.data_conn.delete_bucket(bucket.name) except Exception as err: logger.warn("Delete bucket exception: %s", str(err)) super(TestBucketAccess, self).tearDown()
def _create_bucket(self, bucket_name=None): """ To create bucket with bucket_name """ if bucket_name is None: bucket_name = bucketname.get_unique_bucket_name() logger.debug("Create bucket: %s", bucket_name) bucket = self.data_conn.create_bucket(bucket_name) eq(isinstance(bucket, Bucket), True) return bucket
def _reuse_bucket(self): ''' At first, create a new bucket for reuse. In other cases, reuse the bucket. ''' # Need to export ECSTEST_REUSE_BUCKET_NAME # to get an unique bucket name for reuse. # Otherwise, it will use a default name in config.py self.__bucket_name = self.cfg['REUSE_BUCKET_NAME'] self.bucket_name = self.__bucket_name try: self.__bucket = self.data_conn.get_bucket(self.__bucket_name) except: # Ignore exception here. # It will create a new bucket. logger.debug("create bucket %s for reuse", self.__bucket_name) self.__bucket = self.data_conn.create_bucket(self.__bucket_name)
def setUp(self, create_tmpdir=False, create_bucket=False, allow_reuse_bucket=True): super(EcsDataPlaneTestBase, self).setUp() config.set_boto_config() cfg = self.cfg self.target = cfg['TEST_TARGET'] if self.target not in constants.VALID_TARGETS: raise Exception('invalid test targets') self.data_conn = self.get_conn(host=cfg['ACCESS_SERVER']) self.alt_data_conn = self.get_conn(host=cfg['ALT_ACCESS_SERVER']) if create_tmpdir: self.__tmpdir = tempfile.mkdtemp(dir='/var/tmp') self.tmpdir = self.__tmpdir logger.debug("tmpdir %s was created.", self.tmpdir) else: self.__tmpdir = None if create_bucket: # 1 when test case allows to reuse bucket name and # env variable ECSTEST_REUSE_BUCKET_NAME is set, reuse name # 2 all rest situation to use new name. self.allow_reuse_bucket_flag = \ allow_reuse_bucket is True and \ self.cfg['REUSE_BUCKET_NAME'] is not None if self.allow_reuse_bucket_flag is True: self._reuse_bucket() else: prefix = bucketname.get_unique_bucket_name_prefix() self.__bucket_name = bucketname.get_unique_bucket_name(prefix) self.bucket_name = self.__bucket_name logger.debug("bucket name: %s", self.bucket_name) self.__bucket = \ self.data_conn.create_bucket(self.__bucket_name) self.bucket = self.__bucket else: self.__bucket = None
def test_object_list_from_distinct_bucket(self): """ operation: list assertion: distinct buckets have different contents """ bucket1 = self._create_bucket() bucket2 = self._create_bucket() name = keyname.get_unique_key_name() key = bucket1.new_key(name) key.set_contents_from_string(name) l = bucket2.list() l = list(l) eq(l, []) for bucket in [bucket1, bucket2]: logger.debug("delete all keys in bucket: %s", bucket.name) utils.delete_keys(bucket, self.target) self.data_conn.delete_bucket(bucket.name)
def generate_node_list(num=None): '''Generate a list of nodes All the nodes appear in this list as evenly as possible ''' if num is None: num = constants.DEFAULT_THREAD_NUMBER node_list = [] # make sure ECS runtest cfg file includes below item: # export ECSTEST_TEST_TARGET='ECS' if cfg['TEST_TARGET'] == constants.TARGET_ECS: # if the test target is ECS, then get the node list from ECS REST API vdc_list = _get_vdc_list() logger.debug('get VDC list: %s', vdc_list) # In case that a VDC contains different node number, # this variable identifies a VDC with max nodes number. max_len_vdc = max([len(vdc) for vdc in vdc_list]) # use below parsing logic to return a node list with required number for i in range(max_len_vdc): for j in range(len(vdc_list)): if i < len(vdc_list[j]): node_list.append(vdc_list[j][i]) if len(node_list) == num: return node_list else: # if the test target is AWSS3 or FAKES3, just use them literally nodes = [cfg['ACCESS_SERVER'], cfg['ALT_ACCESS_SERVER']] while num > 0: items = min(num, len(nodes)) node_list.extend(nodes[0:items]) num -= items return node_list
def _get_vdc_list(): ''' Generate VDC list in ECS ''' # get VDC list with ECS rest API vdc_list = [] admin = usermgmt.UserAdmin() auth_token = admin.login() url = cfg['CONTROL_ENDPOINT'] + '/object/vdcs/vdc/list' headers = { 'Content-Type': 'application/xml', 'X-SDS-AUTH-TOKEN': auth_token } try: session = requests.Session() response = session.get(url, headers=headers, verify=False) content = response.text # filter the response body to get the interVdcEndPoints for each VDC pat = re.compile(r'<interVdcEndPoints>([\d., ]*)</interVdcEndPoints>') # a list of VDC will be like as below, each item is a string type. # [u'172.29.3.148, 172.29.3.149, 172.29.3.150, 172.29.3.151', # u'172.29.3.212, 172.29.3.213, 172.29.3.214, 172.29.3.215'] vdc_list = pat.findall(content.replace('\n', '')) # return a VDC list with each VDC item is a list of nodes return [[ip.strip() for ip in vdc.split(',')] for vdc in vdc_list] finally: # leak auth token if admin.client.token: logger.debug('release the auth token') admin.logout()
def tearDown(self): if self.__tmpdir is not None: logger.debug("delete tmpdir: %s", self.__tmpdir) shutil.rmtree(self.__tmpdir) self.__tmpdir = None if self.__bucket is not None: logger.debug("delete all keys in bucket: %s", self.__bucket_name) # Sometime the bucket just disappears # Since this is the tearDown() function, just ignore it # Case: object_post_test.py:TestObjectPost.test_post_object_with_special_valid_name utils.delete_keys(self.__bucket, self.target) if self.allow_reuse_bucket_flag is True: logger.debug("reuse bucket will not be deleted") else: self.data_conn.delete_bucket(self.__bucket_name) super(EcsDataPlaneTestBase, self).tearDown()
def request(method, url, access_key, secret_key, **kwargs): """Sends request with different method This is to construct a whole headers by adding basic header e.g. Date/Authorization besides user defined. And also sort params by its name for signature. """ if access_key is None: access_key = cfg["ACCESS_KEY"] if secret_key is None: secret_key = cfg["ACCESS_SECRET"] logger.debug("access_key: %s, secret_key: %s", access_key, secret_key) if "headers" in kwargs: headers = kwargs["headers"].copy() else: headers = {} if not find_matching_headers(constants.DATE, headers): headers[constants.DATE] = formatdate(usegmt=True) string_to_sign = "%s\n" % method if find_matching_headers(constants.CONTENT_MD5, headers): string_to_sign += "%s\n" % headers[constants.CONTENT_MD5] else: string_to_sign += "\n" if find_matching_headers(constants.CONTENT_TYPE, headers): string_to_sign += "%s\n" % headers[constants.CONTENT_TYPE] else: string_to_sign += "\n" string_to_sign += headers[constants.DATE] + "\n" # Sort canonicalizedAmzHeaders by name amz_keys = [h for h in headers if h.lower().startswith("x-amz-")] amz_keys.sort() for key in amz_keys: string_to_sign += "%s:%s\n" % (key, headers[key].strip()) canonicalized_resource = "" # The subresources that must be included # when constructing the CanonicalizedResource # for calculating signature. subresources = [ "acl", "lifecycle", "location", "logging", "notification", "partNumber", "policy", "requestPayment", "torrent", "uploadId", "uploads", "versionId", "versioning", "versions", "website", ] _, _, _, _, path, _, _ = parse_url(url) if not path: path = "/" canonicalized_resource += path if "params" in kwargs: params = kwargs["params"].copy() else: params = {} subresource_without_value = [] subresource_to_sign = [] # Sort subresource by name, otherwise signature will not match. # If the value of key in dict params is none, e.g. ?acl, # it will not be appended to query string by requests parsing. # So need to pick it up from params to query string by manual. params = sorted(params.items(), key=lambda d: d[0]) for key, value in params: if key in subresources: # subresource has value if value is not None: subresource_to_sign.append("%s=%s" % (key, value)) else: subresource_to_sign.append("%s" % key) subresource_without_value.append("%s" % key) else: if value is None: subresource_without_value.append("%s" % key) if subresource_to_sign: canonicalized_resource += "?" canonicalized_resource += "&".join(subresource_to_sign) if subresource_without_value: url += "?" url += "&".join(subresource_without_value) string_to_sign += canonicalized_resource logger.debug("string to sign:\n%s", string_to_sign) logger.debug("request url:\n%s", url) headers[constants.AUTHORIZATION] = "AWS %s:%s" % (access_key, utils.get_signature(secret_key, string_to_sign)) logger.debug("headers is " + repr(headers)) if "verify" not in kwargs: kwargs["verify"] = False kwargs["timeout"] = cfg["REQUEST_TIMEOUT"] kwargs["headers"] = headers logger.debug("kwargs headers is " + repr(kwargs["headers"])) kwargs["params"] = params logger.debug("kwargs params is " + repr(kwargs["params"])) session = requests.Session() response = session.request(method, url, **kwargs) return response