def _decode_list_object(top_level_keys, nested_keys, parsed, context): if parsed.get('EncodingType') == 'url' and \ context.get('encoding_type_auto_set'): # URL decode top-level keys in the response if present. for key in top_level_keys: if key in parsed: parsed[key] = unquote_str(parsed[key]) # URL decode nested keys from the response if present. for (top_key, child_key) in nested_keys: if top_key in parsed: for member in parsed[top_key]: member[child_key] = unquote_str(member[child_key])
def decode_list_object(parsed, context, **kwargs): # This is needed because we are passing url as the encoding type. Since the # paginator is based on the key, we need to handle it before it can be # round tripped. if 'Contents' in parsed and parsed.get('EncodingType') == 'url' and \ context.get('EncodingTypeAutoSet'): for content in parsed['Contents']: content['Key'] = unquote_str(content['Key'])
def decode_list_object(parsed, context, **kwargs): # This is needed because we are passing url as the encoding type. Since the # paginator is based on the key, we need to handle it before it can be # round tripped. # # From the documentation: If you specify encoding-type request parameter, # Amazon S3 includes this element in the response, and returns encoded key # name values in the following response elements: # Delimiter, Marker, Prefix, NextMarker, Key. if parsed.get("EncodingType") == "url" and context.get("encoding_type_auto_set"): # URL decode top-level keys in the response if present. top_level_keys = ["Delimiter", "Marker", "NextMarker"] for key in top_level_keys: if key in parsed: parsed[key] = unquote_str(parsed[key]) # URL decode nested keys from the response if present. nested_keys = [("Contents", "Key"), ("CommonPrefixes", "Prefix")] for (top_key, child_key) in nested_keys: if top_key in parsed: for member in parsed[top_key]: member[child_key] = unquote_str(member[child_key])
def list_objects(self, bucket, prefix=None): kwargs = {"bucket": bucket, "encoding_type": "url"} if prefix is not None: kwargs["prefix"] = prefix pages = self._operation.paginate(self._endpoint, **kwargs) for response, page in pages: contents = page["Contents"] for content in contents: source_path = bucket + "/" + unquote_str(content["Key"]) size = content["Size"] last_update = self._date_parser(content["LastModified"]) yield source_path, size, last_update
def decode_list_object(parsed, context, **kwargs): # This is needed because we are passing url as the encoding type. Since the # paginator is based on the key, we need to handle it before it can be # round tripped. # # From the documentation: If you specify encoding-type request parameter, # Amazon S3 includes this element in the response, and returns encoded key # name values in the following response elements: # Delimiter, Marker, Prefix, NextMarker, Key. if parsed.get('EncodingType') == 'url' and \ context.get('EncodingTypeAutoSet'): # URL decode top-level keys in the response if present. top_level_keys = ['Delimiter', 'Marker', 'NextMarker'] for key in top_level_keys: if key in parsed: parsed[key] = unquote_str(parsed[key]) # URL decode nested keys from the response if present. nested_keys = [('Contents', 'Key'), ('CommonPrefixes', 'Prefix')] for (top_key, child_key) in nested_keys: if top_key in parsed: for member in parsed[top_key]: member[child_key] = unquote_str(member[child_key])
def _decode_keys(self, parsed, **kwargs): for content in parsed['Contents']: content['Key'] = unquote_str(content['Key'])
def test_unquote_with_spaces(self): value = u'foo+bar' # Note: decoded to unicode and utf-8 decoded as well. # This would work in python2 and python3. self.assertEqual(unquote_str(value), 'foo bar')
def test_unquote_normal(self): value = u'foo' # Note: decoded to unicode and utf-8 decoded as well. # This would work in python2 and python3. self.assertEqual(unquote_str(value), u'foo')
def test_unquote_str(self): value = u'%E2%9C%93' # Note: decoded to unicode and utf-8 decoded as well. # This would work in python2 and python3. self.assertEqual(unquote_str(value), u'\u2713')
def _decode_keys(self, parsed, **kwargs): if "Contents" in parsed: for content in parsed["Contents"]: content["Key"] = unquote_str(content["Key"])