def get_acl(headers, body, bucket_owner, object_owner=None): """ Get ACL instance from S3 (e.g. x-amz-grant) headers or S3 acl xml body. """ acl = ACL.from_headers(headers, bucket_owner, object_owner, as_private=False) if acl is None: # Get acl from request body if possible. if not body: msg = 'Your request was missing a required header' raise MissingSecurityHeader(msg, missing_header_name='x-amz-acl') try: elem = fromstring(body, ACL.root_tag) acl = ACL.from_elem(elem) except (XMLSyntaxError, DocumentInvalid): raise MalformedACLError() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback else: if body: # Specifying grant with both header and xml is not allowed. raise UnexpectedContent() return acl
def PUT(self, req): """ Handle PUT Bucket request """ log_s3api_command(req, 'create-bucket') xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE) if xml: # check location try: elem = fromstring(xml, 'CreateBucketConfiguration') location = elem.find('./LocationConstraint').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: LOGGER.error(e) raise if location != CONF.location: # Swift3 cannot support multiple regions currently. raise InvalidLocationConstraint() resp = req.get_response(self.app) # create bucket+segments to avoid breaking # functional tests for swift3 try: cnt = req.container_name + MULTIUPLOAD_SUFFIX req.get_response(self.app, 'PUT', cnt, '') except BucketAlreadyExists: pass resp.status = HTTP_OK resp.location = '/' + req.container_name return resp
def controller(self): if self.is_service_request: return ServiceController if not self.slo_enabled: multi_part = ["partNumber", "uploadId", "uploads"] if len([p for p in multi_part if p in self.params]): LOGGER.warning("multipart: No SLO middleware in pipeline") raise S3NotImplemented("Multi-part feature isn't support") if "acl" in self.params: return AclController if "delete" in self.params: return MultiObjectDeleteController if "location" in self.params: return LocationController if "logging" in self.params: return LoggingStatusController if "partNumber" in self.params: return PartController if "uploadId" in self.params: return UploadController if "uploads" in self.params: return UploadsController if "versioning" in self.params: return VersioningController unsupported = ("notification", "policy", "requestPayment", "torrent", "website", "cors", "tagging", "restore") if set(unsupported) & set(self.params): return UnsupportedController if self.is_object_request: return ObjectController return BucketController
def controller(self): if self.is_service_request: return ServiceController if not self.slo_enabled: multi_part = ['partNumber', 'uploadId', 'uploads'] if len([p for p in multi_part if p in self.params]): LOGGER.warning('multipart: No SLO middleware in pipeline') raise S3NotImplemented("Multi-part feature isn't support") if 'acl' in self.params: return AclController if 'delete' in self.params: return MultiObjectDeleteController if 'location' in self.params: return LocationController if 'logging' in self.params: return LoggingStatusController if 'partNumber' in self.params: return PartController if 'uploadId' in self.params: return UploadController if 'uploads' in self.params: return UploadsController if 'versioning' in self.params: return VersioningController unsupported = ('notification', 'policy', 'requestPayment', 'torrent', 'website', 'cors', 'tagging', 'restore') if set(unsupported) & set(self.params): return UnsupportedController if self.is_object_request: return ObjectController return BucketController
def PUT(self, req): """ Handle PUT Bucket request """ xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE) if xml: # check location try: elem = fromstring(xml, 'CreateBucketConfiguration') location = elem.find('./LocationConstraint').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: LOGGER.error(e) raise if location != CONF.location: # Swift3 cannot support multiple reagions now. raise InvalidLocationConstraint() if 'HTTP_X_AMZ_ACL' in req.environ: handle_acl_header(req) resp = req.get_response(self.app) resp.status = HTTP_OK resp.location = '/' + req.container_name return resp
def PUT(self, req): """ Handle PUT Bucket request """ xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE) if xml: # check location try: elem = fromstring(xml, 'CreateBucketConfiguration') location = elem.find('./LocationConstraint').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback if location != CONF.location: # Swift3 cannot support multiple regions currently. raise InvalidLocationConstraint() resp = req.get_response(self.app) resp.status = HTTP_OK resp.location = '/' + req.container_name return resp
def PUT(self, req): """ Handles PUT Bucket versioning. """ log_s3api_command(req, 'put-bucket-versioning') xml = req.xml(MAX_PUT_VERSIONING_BODY_SIZE) try: elem = fromstring(xml, 'VersioningConfiguration') status = elem.find('./Status').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: LOGGER.error(e) raise if status not in ['Enabled', 'Suspended']: raise MalformedXML() # Make sure the versions container exists req.container_name += VERSIONING_SUFFIX try: req.get_container_info(self.app) except NoSuchBucket: req.get_response(self.app, 'PUT', req.container_name, '') # Set up versioning if status == 'Enabled': req.headers['X-History-Location'] = req.container_name else: req.headers['X-Remove-History-Location'] = 'true' # Set the container back to what it originally was req.container_name = req.container_name[:-len(VERSIONING_SUFFIX)] resp = req.get_response(self.app, 'POST') return convert_response(req, resp, 204, HTTPOk)
def get_acl(headers, body, bucket_owner, object_owner=None): """ Get ACL instance from S3 (e.g. x-amz-grant) headers or S3 acl xml body. """ acl = ACL.from_headers(headers, bucket_owner, object_owner, as_private=False) if acl is None: # Get acl from request body if possible. if not body: msg = 'Your request was missing a required header' raise MissingSecurityHeader(msg, missing_header_name='x-amz-acl') try: elem = fromstring(body, ACL.root_tag) acl = ACL.from_elem(elem) except(XMLSyntaxError, DocumentInvalid): raise MalformedACLError() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback else: if body: # Specifying grant with both header and xml is not allowed. raise UnexpectedContent() return acl
def wrapped(self, req): if not req.is_bucket_request: if err_resp: raise err_resp(msg=err_msg) LOGGER.debug('A key is specified for bucket API.') req.object_name = None return func(self, req)
def check_filter_order(pipeline, required_filters): """ Check that required filters are present in order in the pipeline. """ try: indexes = [pipeline.index(f) for f in required_filters] except ValueError as e: LOGGER.debug(e) return False return indexes == sorted(indexes)
def handle_request(self, req): LOGGER.debug('Calling Swift3 Middleware') LOGGER.debug(req.__dict__) controller = req.controller(self.app) if hasattr(controller, req.method): res = getattr(controller, req.method)(req) else: raise MethodNotAllowed(req.method, req.controller.resource_type()) return res
def handle_request(self, req): LOGGER.debug('Calling Swift3 Middleware') LOGGER.debug(req.__dict__) controller = req.controller(self.app) if hasattr(controller, req.method): handler = getattr(controller, req.method) if not getattr(handler, 'publicly_accessible', False): raise MethodNotAllowed(req.method, req.controller.resource_type()) res = handler(req) else: raise MethodNotAllowed(req.method, req.controller.resource_type()) return res
def decode_acl(resource, headers): """ Decode Swift metadata to an ACL instance. Given a resource type and HTTP headers, this method returns an ACL instance. """ value = '' key = sysmeta_header(resource, 'acl') if key in headers: value = headers[key] if value == '': # Fix me: In the case of value is empty or not dict instance, # I want an instance of Owner as None. # However, in the above process would occur error in reference # to an instance variable of Owner. return ACL(Owner(None, None), []) try: encode_value = json.loads(value) if not isinstance(encode_value, dict): return ACL(Owner(None, None), []) id = None name = None grants = [] if 'Owner' in encode_value: id = encode_value['Owner'] name = encode_value['Owner'] if 'Grant' in encode_value: for grant in encode_value['Grant']: grantee = None # pylint: disable-msg=E1101 for group in Group.__subclasses__(): if group.__name__ == grant['Grantee']: grantee = group() if not grantee: grantee = User(grant['Grantee']) permission = grant['Permission'] grants.append(Grant(grantee, permission)) return ACL(Owner(id, name), grants) except Exception as e: LOGGER.debug(e) pass raise InvalidSubresource((resource, 'acl', value))
def PUT(self, req): """ Handles PUT Bucket acl and PUT Object acl. """ if req.is_object_request: b_resp = req.get_response(self.app, 'HEAD', obj='', skip_check=True) o_resp = req.get_response(self.app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(req.headers, req.xml(ACL.max_xml_length), b_resp.bucket_acl.owner, o_resp.object_acl.owner) # Don't change the owner of the resource by PUT acl request. o_resp.object_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the object /%s/%s' % (g.grantee, g.permission, req.container_name, req.object_name)) req.object_acl = req_acl headers = {} src_path = '/%s/%s' % (req.container_name, req.object_name) # object-sysmeta' can be updated by 'Copy' method, # but can not be by 'POST' method. # So headers['X-Copy-From'] for copy request is added here. headers['X-Copy-From'] = quote(src_path) headers['Content-Length'] = 0 req.get_response(self.app, 'PUT', headers=headers, skip_check=True) else: resp = req.get_response(self.app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(req.headers, req.xml(ACL.max_xml_length), resp.bucket_acl.owner) # Don't change the owner of the resource by PUT acl request. resp.bucket_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the bucket /%s' % (g.grantee, g.permission, req.container_name)) req.bucket_acl = req_acl req.get_response(self.app, 'POST', skip_check=True) return HTTPOk()
def POST(self, app): if self.req.is_bucket_request: resp = self._handle_acl(app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(self.req.headers, self.req.xml(ACL.max_xml_length), resp.bucket_acl.owner) # Don't change the owner of the resource by PUT acl request. resp.bucket_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug( 'Grant %s %s permission on the bucket /%s' % (g.grantee, g.permission, self.req.container_name)) self.req.bucket_acl = req_acl else: self._handle_acl(app, self.method)
def POST(self, app): if self.req.is_bucket_request: resp = self._handle_acl(app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(self.req.headers, self.req.xml(ACL.max_xml_length), resp.bucket_acl.owner) # Don't change the owner of the resource by PUT acl request. resp.bucket_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the bucket /%s' % (g.grantee, g.permission, self.req.container_name)) self.req.bucket_acl = req_acl else: self._handle_acl(app, self.method)
def PUT(self, req): """ Handle PUT Bucket request """ xml = req.xml(MAX_PUT_BUCKET_BODY_SIZE) if xml: # check location try: elem = fromstring(xml, 'CreateBucketConfiguration') location = elem.find('./LocationConstraint').text except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: LOGGER.error(e) raise if location != CONF.location: # Swift3 cannot support multiple reagions now. raise InvalidLocationConstraint() if CONF.s3_acl: req_acl = ACL.from_headers(req.headers, Owner(req.user_id, req.user_id)) # To avoid overwriting the existing bucket's ACL, we send PUT # request first before setting the ACL to make sure that the target # container does not exist. resp = req.get_response(self.app) # update metadata req.bucket_acl = req_acl # FIXME If this request is failed, there is a possibility that the # bucket which has no ACL is left. req.get_response(self.app, 'POST') else: if 'HTTP_X_AMZ_ACL' in req.environ: handle_acl_header(req) resp = req.get_response(self.app) resp.status = HTTP_OK resp.location = '/' + req.container_name return resp
def check_pipeline(self, conf): """ Check that proxy-server.conf has an appropriate pipeline for swift3. """ if conf.get('__file__', None) is None: return ctx = loadcontext(loadwsgi.APP, conf.__file__) pipeline = str(PipelineWrapper(ctx)).split(' ') # Add compatible with 3rd party middleware. if check_filter_order(pipeline, ['swift3', 'proxy-server']): auth_pipeline = pipeline[pipeline.index('swift3') + 1: pipeline.index('proxy-server')] # Check SLO middleware if 'slo' not in auth_pipeline: self.slo_enabled = False LOGGER.warning('swift3 middleware is required SLO middleware ' 'to support multi-part upload, please add it ' 'in pipline') if not conf.auth_pipeline_check: LOGGER.debug('Skip pipeline auth check.') return if 'tempauth' in auth_pipeline: LOGGER.debug('Use tempauth middleware.') return elif 'keystoneauth' in auth_pipeline: if check_filter_order(auth_pipeline, ['s3token', 'authtoken', 'keystoneauth']): LOGGER.debug('Use keystone middleware.') return elif len(auth_pipeline): LOGGER.debug('Use third party(unknown) auth middleware.') return raise ValueError('Invalid proxy pipeline: %s' % pipeline)
def check_pipeline(self, conf): """ Check that proxy-server.conf has an appropriate pipeline for swift3. """ if conf.get('__file__', None) is None: return ctx = loadcontext(loadwsgi.APP, conf.__file__) pipeline = str(PipelineWrapper(ctx)).split(' ') # Add compatible with 3rd party middleware. if check_filter_order(pipeline, ['swift3', 'proxy-server']): auth_pipeline = pipeline[pipeline.index('swift3') + 1:pipeline.index('proxy-server')] # Check SLO middleware if 'slo' not in auth_pipeline: self.slo_enabled = False LOGGER.warning('swift3 middleware is required SLO middleware ' 'to support multi-part upload, please add it ' 'in pipline') if not conf.auth_pipeline_check: LOGGER.debug('Skip pipeline auth check.') return if 'tempauth' in auth_pipeline: LOGGER.debug('Use tempauth middleware.') return elif 'keystoneauth' in auth_pipeline: if check_filter_order( auth_pipeline, ['s3token', 'authtoken', 'keystoneauth']): LOGGER.debug('Use keystone middleware.') return elif len(auth_pipeline): LOGGER.debug('Use third party(unknown) auth middleware.') return raise ValueError('Invalid proxy pipeline: %s' % pipeline)
def check_filter_order(pipeline, required_filters): """ Check that required filters are present in order in the pipeline. """ indexes = [] missing_filters = [] for filter in required_filters: try: indexes.append(pipeline.index(filter)) except ValueError as e: LOGGER.debug(e) missing_filters.append(filter) if missing_filters: raise ValueError('Invalid pipeline %r: missing filters %r' % ( pipeline, missing_filters)) if indexes != sorted(indexes): raise ValueError('Invalid pipeline %r: expected filter %s' % ( pipeline, ' before '.join(required_filters)))
def check_filter_order(pipeline, required_filters): """ Check that required filters are present in order in the pipeline. """ indexes = [] missing_filters = [] for filter in required_filters: try: indexes.append(pipeline.index(filter)) except ValueError as e: LOGGER.debug(e) missing_filters.append(filter) if missing_filters: raise ValueError('Invalid pipeline %r: missing filters %r' % (pipeline, missing_filters)) if indexes != sorted(indexes): raise ValueError('Invalid pipeline %r: expected filter %s' % (pipeline, ' before '.join(required_filters)))
def __call__(self, env, start_response): try: req_class = get_request_class(env) req = req_class(env, self.app, self.slo_enabled) resp = self.handle_request(req) except NotS3Request: resp = self.app except ErrorResponse as err_resp: if isinstance(err_resp, InternalError): LOGGER.exception(err_resp) resp = err_resp except Exception as e: LOGGER.exception(e) resp = InternalError(reason=e) if isinstance(resp, ResponseBase) and 'swift.trans_id' in env: resp.headers['x-amz-id-2'] = env['swift.trans_id'] resp.headers['x-amz-request-id'] = env['swift.trans_id'] return resp(env, start_response)
def PUT(self, app): if self.req.is_object_request: b_resp = self.req.get_acl_response(app, 'HEAD', obj='') o_resp = self._handle_acl(app, 'HEAD', permission='WRITE_ACP') req_acl = get_acl(self.req.headers, self.req.xml(ACL.max_xml_length), b_resp.bucket_acl.owner, o_resp.object_acl.owner) # Remove Content-Type to avoid updating it self.req.environ.pop('CONTENT_TYPE', '') # Don't change the owner of the resource by PUT acl request. o_resp.object_acl.check_owner(req_acl.owner.id) for g in req_acl.grants: LOGGER.debug('Grant %s %s permission on the object /%s/%s' % (g.grantee, g.permission, self.req.container_name, self.req.object_name.decode('utf-8'))) self.req.object_acl = req_acl else: self._handle_acl(app, self.method)
def PUT(self, req): # pylint: disable=invalid-name """ Handles PUT Bucket CORS. """ log_s3api_command(req, 'put-bucket-cors') xml = req.xml(MAX_CORS_BODY_SIZE) try: data = fromstring(xml, "CorsConfiguration") except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as exc: LOGGER.error(exc) raise # forbid wildcard for ExposeHeader check_cors_rule(data) req.headers[BUCKET_CORS_HEADER] = xml resp = req.get_response(self.app, 'POST') return convert_response(req, resp, 204, HTTPOk)
def PUT(self, req): # pylint: disable=invalid-name """ Handles PUT Bucket CORs. """ xml = req.xml(MAX_CORS_BODY_SIZE) try: data = fromstring(xml, "CorsConfiguration") except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback # forbid wildcard for ExposeHeader check_cors_rule(data) req.headers[BUCKET_CORS_HEADER] = xml resp = req._get_response(self.app, 'POST', req.container_name, None) return self.convert(req, resp, 204, HTTPOk)
def check_pipeline(self, conf): """ Check that proxy-server.conf has an appropriate pipeline for swift3. """ if conf.get('__file__', None) is None: return ctx = loadcontext(loadwsgi.APP, conf.__file__) pipeline = str(PipelineWrapper(ctx)).split(' ') # Add compatible with 3rd party middleware. check_filter_order(pipeline, ['swift3', 'proxy-server']) auth_pipeline = pipeline[pipeline.index('swift3') + 1:pipeline.index('proxy-server')] # Check SLO middleware if self.slo_enabled and 'slo' not in auth_pipeline: self.slo_enabled = False LOGGER.warning('swift3 middleware requires SLO middleware ' 'to support multi-part upload, please add it ' 'in pipeline') # Check IAM middleware position: when enabled, must be before swift3 if 'iam' in pipeline: check_filter_order(pipeline, ['iam', 'swift3']) if not conf.auth_pipeline_check: LOGGER.debug('Skip pipeline auth check.') return if 'tempauth' in auth_pipeline: LOGGER.debug('Use tempauth middleware.') elif 'keystoneauth' in auth_pipeline: check_filter_order(auth_pipeline, ['s3token', 'keystoneauth']) LOGGER.debug('Use keystone middleware.') elif len(auth_pipeline): LOGGER.debug('Use third party(unknown) auth middleware.') else: raise ValueError('Invalid pipeline %r: expected auth between ' 'swift3 and proxy-server ' % pipeline)
def fromstring(text, root_tag=None): try: elem = lxml.etree.fromstring(text, parser) except lxml.etree.XMLSyntaxError as e: LOGGER.debug(e) raise XMLSyntaxError(e) cleanup_namespaces(elem) if root_tag is not None: # validate XML try: path = 'schema/%s.rng' % camel_to_snake(root_tag) with resource_stream(__name__, path) as rng: lxml.etree.RelaxNG(file=rng).assertValid(elem) except IOError as e: # Probably, the schema file doesn't exist. exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback except lxml.etree.DocumentInvalid as e: LOGGER.debug(e) raise DocumentInvalid(e) return elem
def POST(self, req): """ Handles Delete Multiple Objects. """ def object_key_iter(elem): for obj in elem.iterchildren('Object'): key = obj.find('./Key').text if not key: raise UserKeyMustBeSpecified() version = obj.find('./VersionId') if version is not None: version = version.text yield key, version try: xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True) elem = fromstring(xml, 'Delete') quiet = elem.find('./Quiet') if quiet is not None and quiet.text.lower() == 'true': self.quiet = True else: self.quiet = False delete_list = list(object_key_iter(elem)) if len(delete_list) > CONF.max_multi_delete_objects: raise MalformedXML() except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback elem = Element('DeleteResult') # check bucket existence try: req.get_response(self.app, 'HEAD') except AccessDenied as error: body = self._gen_error_body(error, elem, delete_list) return HTTPOk(body=body) for key, version in delete_list: if version is not None: # TODO: delete the specific version of the object raise S3NotImplemented() req.object_name = key try: query = req.gen_multipart_manifest_delete_query(self.app) req.get_response(self.app, method='DELETE', query=query) except ErrorResponse as e: error = SubElement(elem, 'Error') SubElement(error, 'Key').text = key SubElement(error, 'Code').text = e.__class__.__name__ SubElement(error, 'Message').text = e._msg continue if not self.quiet: deleted = SubElement(elem, 'Deleted') SubElement(deleted, 'Key').text = key body = tostring(elem) return HTTPOk(body=body)
def POST(self, req): """ Handles Complete Multipart Upload. """ upload_id = req.params['uploadId'] resp = _get_upload_info(req, self.app, upload_id) headers = {} for key, val in resp.headers.iteritems(): _key = key.lower() if _key.startswith('x-amz-meta-'): headers['x-object-meta-' + _key[11:]] = val elif _key == 'content-type': headers['Content-Type'] = val # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + MULTIUPLOAD_SUFFIX resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = json.loads(resp.body) objtable = dict((o['name'], {'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes']}) for o in objinfo) manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback # Following swift commit 7f636a5, zero-byte segments aren't allowed, # even as the final segment if int(info['size_bytes']) == 0: manifest.pop() # Ordinarily, we just let SLO check segment sizes. However, we # just popped off a zero-byte segment; if there was a second # zero-byte segment and it was at the end, it would succeed on # Swift < 2.6.0 and fail on newer Swift. It seems reasonable that # it should always fail. if manifest and int(manifest[-1]['size_bytes']) == 0: raise EntityTooSmall() try: # TODO: add support for versioning if manifest: resp = req.get_response(self.app, 'PUT', body=json.dumps(manifest), query={'multipart-manifest': 'put'}, headers=headers) else: # the upload must have consisted of a single zero-length part # just write it directly resp = req.get_response(self.app, 'PUT', body='', headers=headers) except BadSwiftRequest as e: msg = str(e) msg_pre_260 = 'Each segment, except the last, must be at least ' # see https://github.com/openstack/swift/commit/c0866ce msg_260 = ('too small; each segment, except the last, must be ' 'at least ') # see https://github.com/openstack/swift/commit/7f636a5 msg_post_260 = 'too small; each segment must be at least 1 byte' if msg.startswith(msg_pre_260) or \ msg_260 in msg or msg_post_260 in msg: # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise if int(info['size_bytes']) == 0: # clean up the zero-byte segment empty_seg_cont, empty_seg_name = info['path'].split('/', 2)[1:] req.get_response(self.app, 'DELETE', container=empty_seg_cont, obj=empty_seg_name) # clean up the multipart-upload record obj = '%s/%s' % (req.object_name, upload_id) req.get_response(self.app, 'DELETE', container, obj) result_elem = Element('CompleteMultipartUploadResult') # NOTE: boto with sig v4 appends port to HTTP_HOST value at the # request header when the port is non default value and it makes # req.host_url like as http://localhost:8080:8080/path # that obviously invalid. Probably it should be resolved at # swift.common.swob though, tentatively we are parsing and # reconstructing the correct host_url info here. # in detail, https://github.com/boto/boto/pull/3513 parsed_url = urlparse(req.host_url) host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname) if parsed_url.port: host_url += ':%s' % parsed_url.port SubElement(result_elem, 'Location').text = host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = resp.etag resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp
def POST(self, req): """ Handles Complete Multipart Upload. """ log_s3api_command(req, 'complete-multipart-upload') upload_id = req.params['uploadId'] resp = _get_upload_info(req, self.app, upload_id) headers = {} for key, val in resp.headers.iteritems(): _key = key.lower() if _key.startswith('x-amz-meta-'): headers['x-object-meta-' + _key[11:]] = val elif _key == 'content-type': headers['Content-Type'] = val for key, val in resp.sysmeta_headers.items(): _key = key.lower() if _key == OBJECT_TAGGING_HEADER.lower(): headers[key] = val # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } # Force the master to be sure to fetch all uploaded parts req.environ.setdefault('oio.query', {}) req.environ['oio.query']['force_master'] = True container = req.container_name + MULTIUPLOAD_SUFFIX resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = json.loads(resp.body) # pylint: disable-msg=no-member objinfo.sort(key=lambda o: int(o['name'].split('/')[-1])) objtable = dict((o['name'].encode('utf-8'), { 'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes'] }) for o in objinfo) s3_etag_hasher = md5() manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) if not xml: raise InvalidRequest(msg='You must specify at least one part') if 'content-md5' in req.headers: # If an MD5 was provided, we need to verify it. # Note that S3Request already took care of translating to ETag if req.headers['etag'] != md5(xml).hexdigest(): raise BadDigest(content_md5=req.headers['content-md5']) # We're only interested in the body here, in the # multipart-upload controller -- *don't* let it get # plumbed down to the object-server del req.headers['etag'] complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) s3_etag_hasher.update(binascii.a2b_hex(etag)) info['size_bytes'] = int(info['size_bytes']) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: LOGGER.error(e) raise s3_etag = '%s-%d' % (s3_etag_hasher.hexdigest(), len(manifest)) headers[sysmeta_header('object', 'etag')] = s3_etag # Leave base header value blank; SLO will populate c_etag = '; s3_etag=%s' % s3_etag headers['X-Object-Sysmeta-Container-Update-Override-Etag'] = c_etag # Following swift commit 7f636a5, zero-byte segments aren't allowed, # even as the final segment empty_seg = None if manifest[-1]['size_bytes'] == 0: empty_seg = manifest.pop() # We'll check the sizes of all except the last segment below, but # since we just popped off a zero-byte segment, we should check # that last segment, too. if manifest and manifest[-1]['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() # Check the size of each segment except the last and make sure they are # all more than the minimum upload chunk size for info in manifest[:-1]: if info['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() try: # TODO: add support for versioning if manifest: resp = req.get_response(self.app, 'PUT', body=json.dumps(manifest), query={'multipart-manifest': 'put'}, headers=headers) else: # the upload must have consisted of a single zero-length part # just write it directly resp = req.get_response(self.app, 'PUT', body='', headers=headers) except ErrorResponse as e: msg = str(e._msg) expected_msg = 'too small; each segment must be at least 1 byte' if expected_msg in msg: # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise if empty_seg: # clean up the zero-byte segment _, empty_seg_cont, empty_seg_name = empty_seg['path'].split('/', 2) req.get_response(self.app, 'DELETE', container=empty_seg_cont, obj=empty_seg_name) # clean up the multipart-upload record obj = '%s/%s' % (req.object_name, upload_id) req.environ['oio.ephemeral_object'] = True req.get_response(self.app, 'DELETE', container, obj) result_elem = Element('CompleteMultipartUploadResult') # NOTE: boto with sig v4 appends port to HTTP_HOST value at the # request header when the port is non default value and it makes # req.host_url like as http://localhost:8080:8080/path # that obviously invalid. Probably it should be resolved at # swift.common.swob though, tentatively we are parsing and # reconstructing the correct host_url info here. # in detail, https://github.com/boto/boto/pull/3513 parsed_url = urlparse(req.host_url) host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname) if parsed_url.port: host_url += ':%s' % parsed_url.port SubElement(result_elem, 'Location').text = host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = '"%s"' % s3_etag del resp.headers['ETag'] resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp
def POST(self, req): """ Handles Complete Multipart Upload. """ upload_id = req.params['uploadId'] resp = _get_upload_info(req, self.app, upload_id) headers = {} for key, val in resp.headers.iteritems(): _key = key.lower() if _key.startswith('x-amz-meta-'): headers['x-object-meta-' + _key[11:]] = val hct_header = sysmeta_header('object', 'has-content-type') if resp.sysmeta_headers.get(hct_header) == 'yes': content_type = resp.sysmeta_headers.get( sysmeta_header('object', 'content-type')) elif hct_header in resp.sysmeta_headers: # has-content-type is present but false, so no content type was # set on initial upload. In that case, we won't set one on our # PUT request. Swift will end up guessing one based on the # object name. content_type = None else: content_type = resp.headers.get('Content-Type') if content_type: headers['Content-Type'] = content_type # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + MULTIUPLOAD_SUFFIX resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = json.loads(resp.body) objtable = dict((o['name'], {'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes']}) for o in objinfo) manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) if not xml: raise InvalidRequest(msg='You must specify at least one part') complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) info['size_bytes'] = int(info['size_bytes']) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback # Following swift commit 7f636a5, zero-byte segments aren't allowed, # even as the final segment empty_seg = None if manifest[-1]['size_bytes'] == 0: empty_seg = manifest.pop() # We'll check the sizes of all except the last segment below, but # since we just popped off a zero-byte segment, we should check # that last segment, too. if manifest and manifest[-1]['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() # Check the size of each segment except the last and make sure they are # all more than the minimum upload chunk size for info in manifest[:-1]: if info['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() try: # TODO: add support for versioning if manifest: resp = req.get_response(self.app, 'PUT', body=json.dumps(manifest), query={'multipart-manifest': 'put'}, headers=headers) else: # the upload must have consisted of a single zero-length part # just write it directly resp = req.get_response(self.app, 'PUT', body='', headers=headers) except BadSwiftRequest as e: msg = str(e) expected_msg = 'too small; each segment must be at least 1 byte' if expected_msg in msg: # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise if empty_seg: # clean up the zero-byte segment _, empty_seg_cont, empty_seg_name = empty_seg['path'].split('/', 2) req.get_response(self.app, 'DELETE', container=empty_seg_cont, obj=empty_seg_name) # clean up the multipart-upload record obj = '%s/%s' % (req.object_name, upload_id) try: req.get_response(self.app, 'DELETE', container, obj) except NoSuchKey: pass # We know that this existed long enough for us to HEAD result_elem = Element('CompleteMultipartUploadResult') # NOTE: boto with sig v4 appends port to HTTP_HOST value at the # request header when the port is non default value and it makes # req.host_url like as http://localhost:8080:8080/path # that obviously invalid. Probably it should be resolved at # swift.common.swob though, tentatively we are parsing and # reconstructing the correct host_url info here. # in detail, https://github.com/boto/boto/pull/3513 parsed_url = urlparse(req.host_url) host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname) if parsed_url.port: host_url += ':%s' % parsed_url.port SubElement(result_elem, 'Location').text = host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = resp.etag resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp
def __call__(self, env, start_response): # a lot of this is cribbed from listing_formats / swob.Request if env['REQUEST_METHOD'] != 'GET': # Nothing to translate return self.app(env, start_response) try: v, a, c = split_path( env.get('SCRIPT_NAME', '') + env['PATH_INFO'], 3, 3) if not valid_api_version(v): raise ValueError except ValueError: # not a container request; pass through return self.app(env, start_response) ctx = WSGIContext(self.app) resp_iter = ctx._app_call(env) content_type = content_length = cl_index = None for index, (header, value) in enumerate(ctx._response_headers): header = header.lower() if header == 'content-type': content_type = value.split(';', 1)[0].strip() if content_length: break elif header == 'content-length': cl_index = index try: content_length = int(value) except ValueError: pass # ignore -- we'll bail later if content_type: break if content_type != 'application/json' or content_length is None: start_response(ctx._response_status, ctx._response_headers, ctx._response_exc_info) return resp_iter if content_length > MAX_CONTAINER_LISTING_CONTENT_LENGTH: LOGGER.warn( 'The content length (%d) of the listing is too long (max=%d)', content_length, MAX_CONTAINER_LISTING_CONTENT_LENGTH) start_response(ctx._response_status, ctx._response_headers, ctx._response_exc_info) return resp_iter # We've done our sanity checks, slurp the response into memory with closing_if_possible(resp_iter): body = b''.join(resp_iter) try: listing = json.loads(body) for item in listing: if 'subdir' in item: continue value, params = parse_header(item['hash']) if 's3_etag' in params: item['s3_etag'] = '"%s"' % params.pop('s3_etag') item['hash'] = value + ''.join('; %s=%s' % kv for kv in params.items()) except (TypeError, KeyError, ValueError): # If anything goes wrong above, drop back to original response start_response(ctx._response_status, ctx._response_headers, ctx._response_exc_info) return [body] body = json.dumps(listing) ctx._response_headers[cl_index] = ( ctx._response_headers[cl_index][0], str(len(body)), ) start_response(ctx._response_status, ctx._response_headers, ctx._response_exc_info) return [body]
def POST(self, req): """ Handles Complete Multipart Upload. """ upload_id = req.params['uploadId'] resp = _get_upload_info(req, self.app, upload_id) headers = {} for key, val in resp.headers.iteritems(): _key = key.lower() if _key.startswith('x-amz-meta-'): headers['x-object-meta-' + _key[11:]] = val elif _key == 'content-type': headers['Content-Type'] = val # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + MULTIUPLOAD_SUFFIX resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = json.loads(resp.body) objtable = dict((o['name'], {'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes']}) for o in objinfo) manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback try: # TODO: add support for versioning resp = req.get_response(self.app, 'PUT', body=json.dumps(manifest), query={'multipart-manifest': 'put'}, headers=headers) except BadSwiftRequest as e: msg = str(e) if msg.startswith('Each segment, except the last, ' 'must be at least '): # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise obj = '%s/%s' % (req.object_name, upload_id) req.get_response(self.app, 'DELETE', container, obj) result_elem = Element('CompleteMultipartUploadResult') SubElement(result_elem, 'Location').text = req.host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = resp.etag resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp
def POST(self, req): """ Handles Delete Multiple Objects. """ def object_key_iter(elem): for obj in elem.iterchildren('Object'): key = obj.find('./Key').text if not key: raise UserKeyMustBeSpecified() version = obj.find('./VersionId') if version is not None: version = version.text yield key, version # check bucket permission if CONF.s3_acl: req.get_response(self.app, 'HEAD') try: xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True) elem = fromstring(xml, 'Delete') quiet = elem.find('./Quiet') if quiet is not None and quiet.text.lower() == 'true': self.quiet = True else: self.quiet = False delete_list = list(object_key_iter(elem)) if len(delete_list) > CONF.max_multi_delete_objects: raise MalformedXML() except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: LOGGER.error(e) raise elem = Element('DeleteResult') for key, version in delete_list: if version is not None: # TODO: delete the specific version of the object raise S3NotImplemented() req.object_name = key try: req.get_response(self.app, method='DELETE') except NoSuchKey: pass except ErrorResponse as e: error = SubElement(elem, 'Error') SubElement(error, 'Key').text = key SubElement(error, 'Code').text = e.__class__.__name__ SubElement(error, 'Message').text = e._msg continue if not self.quiet: deleted = SubElement(elem, 'Deleted') SubElement(deleted, 'Key').text = key body = tostring(elem) return HTTPOk(body=body)
def POST(self, req): """ Handles Complete Multipart Upload. """ upload_id = req.params['uploadId'] resp = _get_upload_info(req, self.app, upload_id) headers = {} for key, val in resp.headers.iteritems(): _key = key.lower() if _key.startswith('x-amz-meta-'): headers['x-object-meta-' + _key[11:]] = val elif _key == 'content-type': headers['Content-Type'] = val # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + MULTIUPLOAD_SUFFIX resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = json.loads(resp.body) objtable = dict((o['name'], { 'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes'] }) for o in objinfo) etag_hash = md5() for obj in objinfo: etag_hash.update(unhexlify(obj['hash'])) s3_etag = "%s-%d" % (etag_hash.hexdigest(), len(objinfo)) headers['Content-Type'] += ";s3_etag=%s" % s3_etag manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) info['size_bytes'] = int(info['size_bytes']) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback # Following swift commit 7f636a5, zero-byte segments aren't allowed, # even as the final segment empty_seg = None if manifest[-1]['size_bytes'] == 0: empty_seg = manifest.pop() # We'll check the sizes of all except the last segment below, but # since we just popped off a zero-byte segment, we should check # that last segment, too. if manifest and manifest[-1]['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() # Check the size of each segment except the last and make sure they are # all more than the minimum upload chunk size for info in manifest[:-1]: if info['size_bytes'] < CONF.min_segment_size: raise EntityTooSmall() try: # TODO: add support for versioning if manifest: resp = req.get_response(self.app, 'PUT', body=json.dumps(manifest), query={'multipart-manifest': 'put'}, headers=headers) else: # the upload must have consisted of a single zero-length part # just write it directly resp = req.get_response(self.app, 'PUT', body='', headers=headers) except BadSwiftRequest as e: msg = str(e) expected_msg = 'too small; each segment must be at least 1 byte' if expected_msg in msg: # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise if empty_seg: # clean up the zero-byte segment _, empty_seg_cont, empty_seg_name = empty_seg['path'].split('/', 2) req.get_response(self.app, 'DELETE', container=empty_seg_cont, obj=empty_seg_name) # clean up the multipart-upload record obj = '%s/%s' % (req.object_name, upload_id) req.get_response(self.app, 'DELETE', container, obj) result_elem = Element('CompleteMultipartUploadResult') # NOTE: boto with sig v4 appends port to HTTP_HOST value at the # request header when the port is non default value and it makes # req.host_url like as http://localhost:8080:8080/path # that obviously invalid. Probably it should be resolved at # swift.common.swob though, tentatively we are parsing and # reconstructing the correct host_url info here. # in detail, https://github.com/boto/boto/pull/3513 parsed_url = urlparse(req.host_url) host_url = '%s://%s' % (parsed_url.scheme, parsed_url.hostname) if parsed_url.port: host_url += ':%s' % parsed_url.port SubElement(result_elem, 'Location').text = host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = '"%s"' % s3_etag del resp.headers['ETag'] resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp
def POST(self, req): """ Handles Complete Multipart Upload. """ upload_id = req.params['uploadId'] _check_upload_info(req, self.app, upload_id) # Query for the objects in the segments area to make sure it completed query = { 'format': 'json', 'prefix': '%s/%s/' % (req.object_name, upload_id), 'delimiter': '/' } container = req.container_name + '+segments' resp = req.get_response(self.app, 'GET', container, '', query=query) objinfo = loads(resp.body) objtable = dict((o['name'], { 'path': '/'.join(['', container, o['name']]), 'etag': o['hash'], 'size_bytes': o['bytes'] }) for o in objinfo) manifest = [] previous_number = 0 try: xml = req.xml(MAX_COMPLETE_UPLOAD_BODY_SIZE) complete_elem = fromstring(xml, 'CompleteMultipartUpload') for part_elem in complete_elem.iterchildren('Part'): part_number = int(part_elem.find('./PartNumber').text) if part_number <= previous_number: raise InvalidPartOrder(upload_id=upload_id) previous_number = part_number etag = part_elem.find('./ETag').text if len(etag) >= 2 and etag[0] == '"' and etag[-1] == '"': # strip double quotes etag = etag[1:-1] info = objtable.get("%s/%s/%s" % (req.object_name, upload_id, part_number)) if info is None or info['etag'] != etag: raise InvalidPart(upload_id=upload_id, part_number=part_number) manifest.append(info) except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: LOGGER.error(e) raise try: # TODO: add support for versioning resp = req.get_response(self.app, 'PUT', body=dumps(manifest), query={'multipart-manifest': 'put'}) except BadSwiftRequest as e: msg = str(e) if msg.startswith('Each segment, except the last, ' 'must be at least '): # FIXME: AWS S3 allows a smaller object than 5 MB if there is # only one part. Use a COPY request to copy the part object # from the segments container instead. raise EntityTooSmall(msg) else: raise obj = '%s/%s' % (req.object_name, upload_id) req.get_response(self.app, 'DELETE', container, obj) result_elem = Element('CompleteMultipartUploadResult') SubElement(result_elem, 'Location').text = req.host_url + req.path SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'Key').text = req.object_name SubElement(result_elem, 'ETag').text = resp.etag resp.body = tostring(result_elem) resp.status = 200 resp.content_type = "application/xml" return resp