Пример #1
0
def _decode_list_object(top_level_keys, nested_keys, parsed, context):
    if parsed.get('EncodingType') == 'url' and \
                    context.get('encoding_type_auto_set'):
        # URL decode top-level keys in the response if present.
        for key in top_level_keys:
            if key in parsed:
                parsed[key] = unquote_str(parsed[key])
        # URL decode nested keys from the response if present.
        for (top_key, child_key) in nested_keys:
            if top_key in parsed:
                for member in parsed[top_key]:
                    member[child_key] = unquote_str(member[child_key])
Пример #2
0
def _decode_list_object(top_level_keys, nested_keys, parsed, context):
    if parsed.get('EncodingType') == 'url' and \
                    context.get('encoding_type_auto_set'):
        # URL decode top-level keys in the response if present.
        for key in top_level_keys:
            if key in parsed:
                parsed[key] = unquote_str(parsed[key])
        # URL decode nested keys from the response if present.
        for (top_key, child_key) in nested_keys:
            if top_key in parsed:
                for member in parsed[top_key]:
                    member[child_key] = unquote_str(member[child_key])
Пример #3
0
def decode_list_object(parsed, context, **kwargs):
    # This is needed because we are passing url as the encoding type. Since the
    # paginator is based on the key, we need to handle it before it can be
    # round tripped.
    if 'Contents' in parsed and parsed.get('EncodingType') == 'url' and \
                    context.get('EncodingTypeAutoSet'):
        for content in parsed['Contents']:
            content['Key'] = unquote_str(content['Key'])
Пример #4
0
def decode_list_object(parsed, context, **kwargs):
    # This is needed because we are passing url as the encoding type. Since the
    # paginator is based on the key, we need to handle it before it can be
    # round tripped.
    if 'Contents' in parsed and parsed.get('EncodingType') == 'url' and \
                    context.get('EncodingTypeAutoSet'):
        for content in parsed['Contents']:
            content['Key'] = unquote_str(content['Key'])
Пример #5
0
def decode_list_object(parsed, context, **kwargs):
    # This is needed because we are passing url as the encoding type. Since the
    # paginator is based on the key, we need to handle it before it can be
    # round tripped.
    #
    # From the documentation: If you specify encoding-type request parameter,
    # Amazon S3 includes this element in the response, and returns encoded key
    # name values in the following response elements:
    # Delimiter, Marker, Prefix, NextMarker, Key.
    if parsed.get("EncodingType") == "url" and context.get("encoding_type_auto_set"):
        # URL decode top-level keys in the response if present.
        top_level_keys = ["Delimiter", "Marker", "NextMarker"]
        for key in top_level_keys:
            if key in parsed:
                parsed[key] = unquote_str(parsed[key])
        # URL decode nested keys from the response if present.
        nested_keys = [("Contents", "Key"), ("CommonPrefixes", "Prefix")]
        for (top_key, child_key) in nested_keys:
            if top_key in parsed:
                for member in parsed[top_key]:
                    member[child_key] = unquote_str(member[child_key])
Пример #6
0
 def list_objects(self, bucket, prefix=None):
     kwargs = {"bucket": bucket, "encoding_type": "url"}
     if prefix is not None:
         kwargs["prefix"] = prefix
     pages = self._operation.paginate(self._endpoint, **kwargs)
     for response, page in pages:
         contents = page["Contents"]
         for content in contents:
             source_path = bucket + "/" + unquote_str(content["Key"])
             size = content["Size"]
             last_update = self._date_parser(content["LastModified"])
             yield source_path, size, last_update
Пример #7
0
def decode_list_object(parsed, context, **kwargs):
    # This is needed because we are passing url as the encoding type. Since the
    # paginator is based on the key, we need to handle it before it can be
    # round tripped.
    #
    # From the documentation: If you specify encoding-type request parameter,
    # Amazon S3 includes this element in the response, and returns encoded key
    # name values in the following response elements:
    # Delimiter, Marker, Prefix, NextMarker, Key.
    if parsed.get('EncodingType') == 'url' and \
                    context.get('EncodingTypeAutoSet'):
        # URL decode top-level keys in the response if present.
        top_level_keys = ['Delimiter', 'Marker', 'NextMarker']
        for key in top_level_keys:
            if key in parsed:
                parsed[key] = unquote_str(parsed[key])
        # URL decode nested keys from the response if present.
        nested_keys = [('Contents', 'Key'), ('CommonPrefixes', 'Prefix')]
        for (top_key, child_key) in nested_keys:
            if top_key in parsed:
                for member in parsed[top_key]:
                    member[child_key] = unquote_str(member[child_key])
Пример #8
0
 def _decode_keys(self, parsed, **kwargs):
     for content in parsed['Contents']:
         content['Key'] = unquote_str(content['Key'])
Пример #9
0
 def test_unquote_with_spaces(self):
     value = u'foo+bar'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), 'foo bar')
Пример #10
0
 def test_unquote_normal(self):
     value = u'foo'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), u'foo')
Пример #11
0
 def test_unquote_str(self):
     value = u'%E2%9C%93'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), u'\u2713')
Пример #12
0
 def _decode_keys(self, parsed, **kwargs):
     for content in parsed['Contents']:
         content['Key'] = unquote_str(content['Key'])
Пример #13
0
 def test_unquote_with_spaces(self):
     value = u'foo+bar'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), 'foo bar')
Пример #14
0
 def test_unquote_normal(self):
     value = u'foo'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), u'foo')
Пример #15
0
 def test_unquote_str(self):
     value = u'%E2%9C%93'
     # Note: decoded to unicode and utf-8 decoded as well.
     # This would work in python2 and python3.
     self.assertEqual(unquote_str(value), u'\u2713')
Пример #16
0
 def _decode_keys(self, parsed, **kwargs):
     if "Contents" in parsed:
         for content in parsed["Contents"]:
             content["Key"] = unquote_str(content["Key"])