def GET(self, req): """ Handles GET Bucket acl and GET Object acl. """ resp = req.get_response(self.app) acl = getattr(resp, "%s_acl" % ("object" if req.is_object_request else "bucket")) resp = HTTPOk() resp.body = tostring(acl.elem()) return resp
def GET(self, req): """ Handles GET Bucket acl and GET Object acl. """ resp = req.get_response(self.app, 'HEAD', permission='READ_ACP') acl = getattr(resp, '%s_acl' % ('object' if req.is_object_request else 'bucket')) resp = HTTPOk() resp.body = tostring(acl.elem()) return resp
def GET(self, req): """ Handles GET Bucket acl and GET Object acl. """ resp = req.get_response(self.app) acl = resp.object_acl if req.is_object_request else resp.bucket_acl resp = HTTPOk() resp.body = tostring(acl.elem()) return resp
def GET(self, req): """ Handles GET Bucket acl and GET Object acl. """ resp = req.get_response(self.app, 'HEAD', permission='READ_ACP') acl = getattr( resp, '%s_acl' % ('object' if req.is_object_request else 'bucket')) resp = HTTPOk() resp.body = tostring(acl.elem()) return resp
def POST(self, req): """ Handles Initiate Multipart Upload. """ log_s3api_command(req, 'create-multipart-upload') # Create a unique S3 upload id from UUID to avoid duplicates. upload_id = unique_id() container = req.container_name + MULTIUPLOAD_SUFFIX obj = '%s/%s' % (req.object_name, upload_id) if HTTP_HEADER_TAGGING_KEY in req.headers: tagging = convert_urlquery_to_xml( req.headers.get(HTTP_HEADER_TAGGING_KEY)) req.headers[OBJECT_TAGGING_HEADER] = tagging req.headers.pop('Etag', None) req.headers.pop('Content-Md5', None) req.environ['oio.ephemeral_object'] = True req.get_response(self.app, 'PUT', container, obj, body='') result_elem = Element('InitiateMultipartUploadResult') SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'UploadId').text = upload_id body = tostring(result_elem) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): # pylint: disable=invalid-name """ Handles GET Bucket tagging and GET Object tagging. """ resp = req.get_versioned_response(self.app, 'HEAD', req.container_name, req.object_name) headers = dict() if req.is_object_request: body = resp.sysmeta_headers.get(OBJECT_TAGGING_HEADER) # It seems that S3 returns x-amz-version-id, # even if it is not documented. headers['x-amz-version-id'] = resp.sw_headers[VERSION_ID_HEADER] else: body = resp.sysmeta_headers.get(BUCKET_TAGGING_HEADER) close_if_possible(resp.app_iter) if not body: if not req.is_object_request: raise NoSuchTagSet(headers=headers) else: elem = Element('Tagging') SubElement(elem, 'TagSet') body = tostring(elem) return HTTPOk(body=body, content_type='application/xml', headers=headers)
def POST(self, req): """ Handles Initiate Multipart Upload. """ # Create a unique S3 upload id from UUID to avoid duplicates. upload_id = unique_id() container = req.container_name + '+segments' try: req.get_response(self.app, 'PUT', container, '') except BucketAlreadyExists: pass obj = '%s/%s' % (req.object_name, upload_id) req.get_response(self.app, 'PUT', container, obj, body='') result_elem = Element('InitiateMultipartUploadResult') SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'UploadId').text = upload_id body = tostring(result_elem) return HTTPOk(body=body, content_type='application/xml')
def HEAD(self, req): """ Handle HEAD Bucket (Get Metadata) request """ resp = req.get_response(self.app) return HTTPOk(headers=resp.headers)
def GET(self, req): """ Handles GET Bucket versioning. """ log_s3api_command(req, 'get-bucket-versioning') info = req.get_container_info(self.app) status = None versions_container = info.get('sysmeta', {}).get('versions-location') if versions_container: status = 'Enabled' else: versions_container = ''.join( [req.container_name, VERSIONING_SUFFIX]) try: req.get_response(self.app, 'HEAD', container=versions_container) status = 'Suspended' except NoSuchBucket: pass # Just report there is no versioning configured here. elem = Element('VersioningConfiguration') if status: SubElement(elem, 'Status').text = status body = tostring(elem) return HTTPOk(body=body, content_type="text/plain")
def GET(self, req): """ Handle GET Service request """ resp = req.get_response(self.app, query={'format': 'json'}) containers = loads(resp.body) # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. elem = Element('ListAllMyBucketsResult') owner = SubElement(elem, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id buckets = SubElement(elem, 'Buckets') for c in containers: bucket = SubElement(buckets, 'Bucket') SubElement(bucket, 'Name').text = c['name'] SubElement(bucket, 'CreationDate').text = \ '2009-02-03T16:45:09.000Z' body = tostring(elem) return HTTPOk(content_type='application/xml', body=body)
def PUT(self, req): """ Handles PUT Bucket versioning. """ xml = req.xml(MAX_PUT_VERSIONING_BODY_SIZE) try: elem = fromstring(xml, 'VersioningConfiguration') status = elem.find('./Status').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback if status not in ['Enabled', 'Suspended']: raise MalformedXML() # Make sure the versions container exists req.container_name += VERSIONING_SUFFIX try: req.get_container_info(self.app) except NoSuchBucket: req.get_response(self.app, 'PUT', req.container_name, '') # Set up versioning if status == 'Enabled': req.headers['X-History-Location'] = req.container_name else: req.headers['X-Remove-History-Location'] = 'true' # Set the container back to what it originally was req.container_name = req.container_name[:-len(VERSIONING_SUFFIX)] req.get_response(self.app, 'POST') return HTTPOk()
def PUT(self, req): """ Handles PUT Bucket acl and PUT Object acl. """ # ACLs will be set as sysmeta req.get_versioned_response(self.app, 'POST') return HTTPOk()
def GET(self, req): # pylint: disable=invalid-name """ Handles GET Bucket CORS. """ resp = req._get_response(self.app, 'HEAD', req.container_name, None) body = resp.sysmeta_headers.get(BUCKET_CORS_HEADER) if not body: raise NoSuchCORSConfiguration return HTTPOk(body=body, content_type='application/xml')
def HEAD(self, req): """ Handle HEAD Bucket (Get Metadata) request """ log_s3api_command(req, 'head-bucket') resp = req.get_response(self.app) return HTTPOk(headers=resp.headers)
def GET(self, req): """ Handles GET Bucket lifecycle. """ resp = req.get_response(self.app, method='HEAD') body = resp.sysmeta_headers.get(LIFECYCLE_HEADER) if not body: raise NoSuchLifecycleConfiguration() return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handles GET Bucket lifecycle. """ info = req.get_container_info(self.app) body = info['sysmeta'].get('swift3-lifecycle') if not body: raise NoSuchLifecycleConfiguration() return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): # pylint: disable=invalid-name """ Handles GET Bucket CORS. """ log_s3api_command(req, 'get-bucket-cors') resp = req.get_response(self.app, method='HEAD') body = resp.sysmeta_headers.get(BUCKET_CORS_HEADER) if not body: raise NoSuchCORSConfiguration return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handles GET Bucket versioning. """ req.get_response(self.app, method='HEAD') # Just report there is no versioning configured here. elem = Element('VersioningConfiguration') body = tostring(elem) return HTTPOk(body=body, content_type="text/plain")
def GET(self, req): """ Handles GET Bucket logging. """ req.get_response(self.app, method='HEAD') # logging disabled elem = Element('BucketLoggingStatus') body = tostring(elem) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handles GET Bucket location. """ req.get_response(self.app, method='HEAD') elem = Element('LocationConstraint') if CONF.location != 'US': elem.text = CONF.location body = tostring(elem) return HTTPOk(body=body, content_type='application/xml')
def PUT(self, req): """ Handles PUT Bucket acl and PUT Object acl. """ if req.is_object_request: b_resp = req.get_response(self.app, 'HEAD', obj='', skip_check=True) o_resp = req.get_response(self.app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(req.headers, req.xml(ACL.max_xml_length), b_resp.bucket_acl.owner, o_resp.object_acl.owner) # Don't change the owner of the resource by PUT acl request. o_resp.object_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the object /%s/%s' % (g.grantee, g.permission, req.container_name, req.object_name)) req.object_acl = req_acl headers = {} src_path = '/%s/%s' % (req.container_name, req.object_name) # object-sysmeta' can be updated by 'Copy' method, # but can not be by 'POST' method. # So headers['X-Copy-From'] for copy request is added here. headers['X-Copy-From'] = quote(src_path) headers['Content-Length'] = 0 req.get_response(self.app, 'PUT', headers=headers, skip_check=True) else: resp = req.get_response(self.app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(req.headers, req.xml(ACL.max_xml_length), resp.bucket_acl.owner) # Don't change the owner of the resource by PUT acl request. resp.bucket_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the bucket /%s' % (g.grantee, g.permission, req.container_name)) req.bucket_acl = req_acl req.get_response(self.app, 'POST', skip_check=True) return HTTPOk()
def GET(self, req): """ Handle GET Service request """ log_s3api_command(req, 'list-buckets') resp = req.get_response(self.app, query={'format': 'json'}) containers = json.loads(resp.body) containers = filter( lambda item: validate_bucket_name(item['name']), containers) # we don't keep the creation time of a bucket (s3cmd doesn't # work without that) so we use something bogus. elem = Element('ListAllMyBucketsResult') owner = SubElement(elem, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id buckets = SubElement(elem, 'Buckets') for c in containers: if 'last_modified' in c: ts = last_modified_date_to_timestamp(c['last_modified']) creation_date = S3Timestamp(ts).s3xmlformat else: creation_date = '2009-02-03T16:45:09.000Z' if CONF.s3_acl and CONF.check_bucket_owner: try: cname = c['name'].encode('utf-8') c_resp = req.get_response(self.app, 'HEAD', cname) if 'X-Timestamp' in c_resp.sw_headers: creation_date = S3Timestamp( c_resp.sw_headers['X-Timestamp']).s3xmlformat except AccessDenied: continue except NoSuchBucket: continue bucket = SubElement(buckets, 'Bucket') SubElement(bucket, 'Name').text = c['name'] SubElement(bucket, 'CreationDate').text = creation_date body = tostring(elem) return HTTPOk(content_type='application/xml', body=body)
def OPTIONS(self, req): origin = req.headers.get('Origin') if not origin: raise CORSOriginMissing() method = req.headers.get('Access-Control-Request-Method') if method not in CORS_ALLOWED_HTTP_METHOD: raise CORSInvalidAccessControlRequest(method=method) rule = get_cors(self.app, req, method, origin) # FIXME(mbo): we should raise also NoSuchCORSConfiguration if rule is None: raise CORSForbidden(method) resp = HTTPOk(body=None) del resp.headers['Content-Type'] return cors_fill_headers(req, resp, rule)
def PUT(self, req): """ Handles PUT Bucket acl and PUT Object acl. """ if req.is_object_request: headers = {} src_path = '/%s/%s' % (req.container_name, req.object_name) # object-sysmeta' can be updated by 'Copy' method, # but can not be by 'POST' method. # So headers['X-Copy-From'] for copy request is added here. headers['X-Copy-From'] = quote(src_path) headers['Content-Length'] = 0 req.get_response(self.app, 'PUT', headers=headers) else: req.get_response(self.app, 'POST') return HTTPOk()
def get_acl(account_name, headers): """ Attempts to construct an S3 ACL based on what is found in the swift headers """ elem = Element('AccessControlPolicy') owner = SubElement(elem, 'Owner') SubElement(owner, 'ID').text = account_name SubElement(owner, 'DisplayName').text = account_name access_control_list = SubElement(elem, 'AccessControlList') # grant FULL_CONTROL to myself by default grant = SubElement(access_control_list, 'Grant') grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'CanonicalUser') SubElement(grantee, 'ID').text = account_name SubElement(grantee, 'DisplayName').text = account_name SubElement(grant, 'Permission').text = 'FULL_CONTROL' referrers, _ = parse_acl(headers.get('x-container-read')) if referrer_allowed('unknown', referrers): # grant public-read access grant = SubElement(access_control_list, 'Grant') grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'Group') SubElement(grantee, 'URI').text = \ 'http://acs.amazonaws.com/groups/global/AllUsers' SubElement(grant, 'Permission').text = 'READ' referrers, _ = parse_acl(headers.get('x-container-write')) if referrer_allowed('unknown', referrers): # grant public-write access grant = SubElement(access_control_list, 'Grant') grantee = SubElement(grant, 'Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'Group') SubElement(grantee, 'URI').text = \ 'http://acs.amazonaws.com/groups/global/AllUsers' SubElement(grant, 'Permission').text = 'WRITE' body = tostring(elem) return HTTPOk(body=body, content_type="text/plain")
def PUT(self, req): """ Handle PUT Object and PUT Object (Copy) request """ if CONF.s3_acl: if 'X-Amz-Copy-Source' in req.headers: src_path = req.headers['X-Amz-Copy-Source'] src_path = src_path if src_path.startswith('/') else \ ('/' + src_path) src_bucket, src_obj = split_path(src_path, 0, 2, True) req.get_response(self.app, 'HEAD', src_bucket, src_obj, permission='READ') b_resp = req.get_response(self.app, 'HEAD', obj='') # To avoid overwriting the existing object by unauthorized user, # we send HEAD request first before writing the object to make # sure that the target object does not exist or the user that sent # the PUT request have write permission. try: req.get_response(self.app, 'HEAD') except NoSuchKey: pass req_acl = ACL.from_headers(req.headers, b_resp.bucket_acl.owner, Owner(req.user_id, req.user_id)) req.object_acl = req_acl resp = req.get_response(self.app) if 'X-Amz-Copy-Source' in req.headers: elem = Element('CopyObjectResult') SubElement(elem, 'ETag').text = '"%s"' % resp.etag body = tostring(elem, use_s3ns=False) return HTTPOk(body=body, headers=resp.headers) resp.status = HTTP_OK return resp
def POST(self, req): """ Handles Initiate Multipart Upload. """ # Create a unique S3 upload id from UUID to avoid duplicates. upload_id = unique_id() container = req.container_name + MULTIUPLOAD_SUFFIX content_type = req.headers.get('Content-Type') if content_type: req.headers[sysmeta_header('object', 'has-content-type')] = 'yes' req.headers[ sysmeta_header('object', 'content-type')] = content_type else: req.headers[sysmeta_header('object', 'has-content-type')] = 'no' req.headers['Content-Type'] = 'application/directory' try: req.get_response(self.app, 'PUT', container, '') except BucketAlreadyExists: pass obj = '%s/%s' % (req.object_name, upload_id) req.headers.pop('Etag', None) req.headers.pop('Content-Md5', None) req.get_response(self.app, 'PUT', container, obj, body='') result_elem = Element('InitiateMultipartUploadResult') SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'UploadId').text = upload_id body = tostring(result_elem) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handle GET Service request """ resp = req.get_response(self.app, query={'format': 'json'}) containers = json.loads(resp.body) containers = filter(lambda item: validate_bucket_name(item['name']), containers) # we don't keep the creation time of a backet (s3cmd doesn't # work without that) so we use something bogus. elem = Element('ListAllMyBucketsResult') owner = SubElement(elem, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id buckets = SubElement(elem, 'Buckets') for c in containers: if CONF.s3_acl and CONF.check_bucket_owner: try: req.get_response(self.app, 'HEAD', c['name']) except AccessDenied: continue except NoSuchBucket: continue bucket = SubElement(buckets, 'Bucket') SubElement(bucket, 'Name').text = c['name'] SubElement(bucket, 'CreationDate').text = \ '2009-02-03T16:45:09.000Z' body = tostring(elem) return HTTPOk(content_type='application/xml', body=body)
def PUT(self, req): # pylint: disable=invalid-name """ Handles PUT Bucket tagging and PUT Object tagging. """ body = req.xml(MAX_TAGGING_BODY_SIZE) try: # Just validate the body fromstring(body, 'Tagging') except (DocumentInvalid, XMLSyntaxError) as exc: raise MalformedXML(str(exc)) if req.object_name: req.headers[OBJECT_TAGGING_HEADER] = body else: req.headers[BUCKET_TAGGING_HEADER] = body resp = req.get_versioned_response(self.app, 'POST', req.container_name, req.object_name) if resp.status_int == 202: headers = dict() if req.object_name: headers['x-amz-version-id'] = \ resp.sw_headers[VERSION_ID_HEADER] return HTTPOk(headers=headers) return resp
def POST(self, req): """ Handles Delete Multiple Objects. """ def object_key_iter(elem): for obj in elem.iterchildren('Object'): key = obj.find('./Key').text if not key: raise UserKeyMustBeSpecified() version = obj.find('./VersionId') if version is not None: version = version.text yield key, version try: xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True) elem = fromstring(xml, 'Delete') quiet = elem.find('./Quiet') if quiet is not None and quiet.text.lower() == 'true': self.quiet = True else: self.quiet = False delete_list = list(object_key_iter(elem)) if len(delete_list) > CONF.max_multi_delete_objects: raise MalformedXML() except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback elem = Element('DeleteResult') # check bucket existence try: req.get_response(self.app, 'HEAD') except AccessDenied as error: body = self._gen_error_body(error, elem, delete_list) return HTTPOk(body=body) for key, version in delete_list: if version is not None: # TODO: delete the specific version of the object raise S3NotImplemented() req.object_name = key try: query = req.gen_multipart_manifest_delete_query(self.app) req.get_response(self.app, method='DELETE', query=query) except ErrorResponse as e: error = SubElement(elem, 'Error') SubElement(error, 'Key').text = key SubElement(error, 'Code').text = e.__class__.__name__ SubElement(error, 'Message').text = e._msg continue if not self.quiet: deleted = SubElement(elem, 'Deleted') SubElement(deleted, 'Key').text = key body = tostring(elem) return HTTPOk(body=body)
def GET(self, req): """ Handle GET Bucket (List Objects) request """ max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing) # TODO: Separate max_bucket_listing and default_bucket_listing tag_max_keys = max_keys max_keys = min(max_keys, CONF.max_bucket_listing) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) query = { 'format': 'json', 'limit': max_keys + 1, } if 'marker' in req.params: query.update({'marker': req.params['marker']}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) if 'delimiter' in req.params: query.update({'delimiter': req.params['delimiter']}) resp = req.get_response(self.app, query=query) objects = json.loads(resp.body) elem = Element('ListBucketResult') SubElement(elem, 'Name').text = req.container_name SubElement(elem, 'Prefix').text = req.params.get('prefix') SubElement(elem, 'Marker').text = req.params.get('marker') # in order to judge that truncated is valid, check whether # max_keys + 1 th element exists in swift. is_truncated = max_keys > 0 and len(objects) > max_keys objects = objects[:max_keys] if is_truncated and 'delimiter' in req.params: if 'name' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['name'] if 'subdir' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['subdir'] SubElement(elem, 'MaxKeys').text = str(tag_max_keys) if 'delimiter' in req.params: SubElement(elem, 'Delimiter').text = req.params['delimiter'] if encoding_type is not None: SubElement(elem, 'EncodingType').text = encoding_type SubElement(elem, 'IsTruncated').text = \ 'true' if is_truncated else 'false' for o in objects: if 'subdir' not in o: contents = SubElement(elem, 'Contents') SubElement(contents, 'Key').text = o['name'] SubElement(contents, 'LastModified').text = \ o['last_modified'][:-3] + 'Z' SubElement(contents, 'ETag').text = '"%s"' % o['hash'] SubElement(contents, 'Size').text = str(o['bytes']) owner = SubElement(contents, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id SubElement(contents, 'StorageClass').text = 'STANDARD' for o in objects: if 'subdir' in o: common_prefixes = SubElement(elem, 'CommonPrefixes') SubElement(common_prefixes, 'Prefix').text = o['subdir'] body = tostring(elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handles List Parts. """ encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) upload_id = req.params['uploadId'] _check_upload_info(req, self.app, upload_id) part_num_marker = 0 # TODO: add support for max-parts and part-number-marker queries. query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + '+segments' resp = req.get_response(self.app, container=container, obj='', query=query) objects = loads(resp.body) last_part = 0 # pylint: disable-msg=E1103 objects.sort(key=lambda o: int(o['name'].split('/')[-1])) if len(objects) > 0: o = objects[-1] last_part = os.path.basename(o['name']) result_elem = Element('ListPartsResult') SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'UploadId').text = upload_id initiator_elem = SubElement(result_elem, 'Initiator') SubElement(initiator_elem, 'ID').text = req.user_id SubElement(initiator_elem, 'DisplayName').text = req.user_id owner_elem = SubElement(result_elem, 'Owner') SubElement(owner_elem, 'ID').text = req.user_id SubElement(owner_elem, 'DisplayName').text = req.user_id SubElement(result_elem, 'StorageClass').text = 'STANDARD' SubElement(result_elem, 'PartNumberMarker').text = str(part_num_marker) SubElement(result_elem, 'NextPartNumberMarker').text = str(last_part) SubElement(result_elem, 'MaxParts').text = str(DEFAULT_MAX_PARTS) # TODO: add support for EncodingType SubElement(result_elem, 'IsTruncated').text = 'false' for i in objects: part_elem = SubElement(result_elem, 'Part') SubElement(part_elem, 'PartNumber').text = i['name'].split('/')[-1] SubElement(part_elem, 'LastModified').text = \ i['last_modified'][:-3] + 'Z' SubElement(part_elem, 'ETag').text = i['hash'] SubElement(part_elem, 'Size').text = str(i['bytes']) body = tostring(result_elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')