def test_object_multi_DELETE(self): self.swift.register('DELETE', '/v1/AUTH_test/bucket/Key1', swob.HTTPNoContent, {}, None) self.swift.register('DELETE', '/v1/AUTH_test/bucket/Key2', swob.HTTPNotFound, {}, None) elem = Element('Delete') for key in ['Key1', 'Key2']: obj = SubElement(elem, 'Object') SubElement(obj, 'Key').text = key body = tostring(elem, use_s3ns=False) content_md5 = md5(body).digest().encode('base64').strip() req = Request.blank('/bucket?delete', environ={'REQUEST_METHOD': 'POST'}, headers={'Authorization': 'AWS test:tester:hmac', 'Content-MD5': content_md5}, body=body) req.date = datetime.now() req.content_type = 'text/plain' status, headers, body = self.call_swift3(req) self.assertEquals(status.split()[0], '200') elem = fromstring(body) self.assertEquals(len(elem.findall('Deleted')), 2)
def elem(self): """ Create an etree element. """ elem = Element('Grant') elem.append(self.grantee.elem()) SubElement(elem, 'Permission').text = self.permission return elem
def test_grant_invalid_group_xml(self): grantee = Element('Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'Invalid') xml = _make_xml(grantee=grantee) req = Request.blank('/bucket/object?acl', environ={'REQUEST_METHOD': 'PUT'}, headers={'Authorization': 'AWS test:tester:hmac'}, body=xml) status, headers, body = self.call_swift3(req) self.assertEquals(self._get_error_code(body), 'MalformedACLError')
def test_grant_email_xml(self): grantee = Element('Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'AmazonCustomerByEmail') SubElement(grantee, 'EmailAddress').text = '*****@*****.**' xml = _make_xml(grantee=grantee) req = Request.blank('/bucket/object?acl', environ={'REQUEST_METHOD': 'PUT'}, headers={'Authorization': 'AWS test:tester:hmac'}, body=xml) status, headers, body = self.call_swift3(req) self.assertEquals(self._get_error_code(body), 'NotImplemented')
def GET(self, req): """ Handles GET Bucket location. """ req.get_response(self.app, method='HEAD') elem = Element('LocationConstraint') if CONF.location != 'US': elem.text = CONF.location body = tostring(elem) return HTTPOk(body=body, content_type='application/xml')
def test_grant_invalid_uri_xml(self): grantee = Element('Grantee', nsmap={'xsi': XMLNS_XSI}) grantee.set('{%s}type' % XMLNS_XSI, 'Group') SubElement(grantee, 'URI').text = 'invalid' xml = _make_xml(grantee) req = Request.blank('/bucket/object?acl', environ={'REQUEST_METHOD': 'PUT'}, headers={'Authorization': 'AWS test:tester:hmac', 'Date': self.get_date_header()}, body=xml) status, headers, body = self.call_swift3(req) self.assertEquals(self._get_error_code(body), 'InvalidArgument')
def test_object_multi_DELETE_quiet(self): self.swift.register("DELETE", "/v1/AUTH_test/bucket/Key1", swob.HTTPNoContent, {}, None) self.swift.register("DELETE", "/v1/AUTH_test/bucket/Key2", swob.HTTPNotFound, {}, None) elem = Element("Delete") SubElement(elem, "Quiet").text = "true" for key in ["Key1", "Key2"]: obj = SubElement(elem, "Object") SubElement(obj, "Key").text = key body = tostring(elem, use_s3ns=False) content_md5 = md5(body).digest().encode("base64").strip() req = Request.blank( "/bucket?delete", environ={"REQUEST_METHOD": "POST"}, headers={"Authorization": "AWS test:tester:hmac", "Content-MD5": content_md5}, body=body, ) status, headers, body = self.call_swift3(req) self.assertEquals(status.split()[0], "200") elem = fromstring(body) self.assertEquals(len(elem.findall("Deleted")), 0)
def test_object_multi_DELETE(self): self.swift.register('HEAD', '/v1/AUTH_test/bucket/Key3', swob.HTTPOk, {'x-static-large-object': 'True'}, None) self.swift.register('DELETE', '/v1/AUTH_test/bucket/Key1', swob.HTTPNoContent, {}, None) self.swift.register('DELETE', '/v1/AUTH_test/bucket/Key2', swob.HTTPNotFound, {}, None) self.swift.register('DELETE', '/v1/AUTH_test/bucket/Key3', swob.HTTPOk, {}, None) elem = Element('Delete') for key in ['Key1', 'Key2', 'Key3']: obj = SubElement(elem, 'Object') SubElement(obj, 'Key').text = key body = tostring(elem, use_s3ns=False) content_md5 = md5(body).digest().encode('base64').strip() req = Request.blank('/bucket?delete', environ={'REQUEST_METHOD': 'POST'}, headers={'Authorization': 'AWS test:tester:hmac', 'Date': self.get_date_header(), 'Content-MD5': content_md5}, body=body) req.date = datetime.now() req.content_type = 'text/plain' status, headers, body = self.call_swift3(req) self.assertEquals(status.split()[0], '200') elem = fromstring(body) self.assertEquals(len(elem.findall('Deleted')), 3) _, path, _ = self.swift.calls_with_headers[-1] path, query_string = path.split('?', 1) self.assertEquals(path, '/v1/AUTH_test/bucket/Key3') query = dict(urllib.parse.parse_qsl(query_string)) self.assertEquals(query['multipart-manifest'], 'delete')
def elem(self): elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI}) elem.set('{%s}type' % XMLNS_XSI, self.type) SubElement(elem, 'URI').text = self.uri return elem
def GET(self, req): """ Handle GET Bucket (List Objects) request """ max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing) # TODO: Separate max_bucket_listing and default_bucket_listing tag_max_keys = max_keys max_keys = min(max_keys, CONF.max_bucket_listing) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) query = { 'format': 'json', 'limit': max_keys + 1, } if 'marker' in req.params: query.update({'marker': req.params['marker']}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) if 'delimiter' in req.params: query.update({'delimiter': req.params['delimiter']}) # GET Bucket (List Objects) Version 2 parameters is_v2 = int(req.params.get('list-type', '1')) == 2 fetch_owner = False if is_v2: log_s3api_command(req, 'list-objects-v2') if 'start-after' in req.params: query.update({'marker': req.params['start-after']}) # continuation-token overrides start-after if 'continuation-token' in req.params: decoded = b64decode(req.params['continuation-token']) query.update({'marker': decoded}) if 'fetch-owner' in req.params: fetch_owner = config_true_value(req.params['fetch-owner']) else: log_s3api_command(req, 'list-objects') resp = req.get_response(self.app, query=query) objects = json.loads(resp.body) if 'versions' in req.params: req.container_name += VERSIONING_SUFFIX query['reverse'] = 'true' try: resp = req.get_response(self.app, query=query) versioned_objects = json.loads(resp.body) prefixes = set() for o in versioned_objects: if 'name' in o: # The name looks like this: # '%03x%s/%s' % (len(name), name, version) o['name'], o['version_id'] = \ o['name'][3:].rsplit('/', 1) else: prefixes.add(o['subdir']) # suppress duplicated prefixes for o in list(objects): if 'subdir' in o and o['subdir'] in prefixes: objects.remove(o) objects.extend(versioned_objects) except NoSuchBucket: # the bucket may not be versioned pass req.container_name = req.container_name[:-len(VERSIONING_SUFFIX)] objects.sort(key=lambda o: o.get('name') or o.get('subdir')) for o in objects: if 'subdir' not in o and not o.get('version_id'): info = req.get_object_info( self.app, object_name=o['name'].encode('utf-8')) o['sysmeta_version_id'] = info.get('sysmeta', {}).get( 'version-id', 'null') if 'versions' in req.params: elem = Element('ListVersionsResult') else: elem = Element('ListBucketResult') if encoding_type is not None: elem.encoding_type = encoding_type SubElement(elem, 'Name').text = req.container_name SubElement(elem, 'Prefix').text = req.params.get('prefix') # Filter objects according to version-id-marker and key-marker v_marker = req.params.get('version-id-marker') k_marker = req.params.get('key-marker') k_marker_matched = not bool(k_marker) if 'versions' in req.params and (v_marker or k_marker): to_delete = [] for i, o in enumerate(objects): if 'subdir' not in o: version_id = o.get('version_id', o.get('sysmeta_version_id', 'null')) if not k_marker_matched and k_marker != o['name']: to_delete.append(i) if k_marker == o['name']: k_marker_matched = True if k_marker == o['name'] and v_marker: if v_marker == version_id: v_marker = None to_delete.append(i) for i in reversed(to_delete): objects.pop(i) # in order to judge that truncated is valid, check whether # max_keys + 1 th element exists in swift. is_truncated = max_keys > 0 and len(objects) > max_keys objects = objects[:max_keys] if not is_v2: if 'versions' in req.params: SubElement(elem, 'KeyMarker').text = req.params.get( 'key-marker') SubElement(elem, 'VersionIdMarker').text = req.params.get( 'version-id-marker') else: SubElement(elem, 'Marker').text = req.params.get('marker') if is_truncated and 'delimiter' in req.params: if 'name' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['name'] if 'subdir' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['subdir'] else: if is_truncated: if 'name' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['name'].encode('utf8')) if 'subdir' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['subdir'].encode('utf8')) if 'continuation-token' in req.params: SubElement(elem, 'ContinuationToken').text = \ req.params['continuation-token'] if 'start-after' in req.params: SubElement(elem, 'StartAfter').text = \ req.params['start-after'] SubElement(elem, 'KeyCount').text = str(len(objects)) SubElement(elem, 'MaxKeys').text = str(tag_max_keys) if 'delimiter' in req.params: SubElement(elem, 'Delimiter').text = req.params['delimiter'] if encoding_type is not None: SubElement(elem, 'EncodingType').text = encoding_type SubElement(elem, 'IsTruncated').text = \ 'true' if is_truncated else 'false' for o in objects: if 'subdir' not in o: if 'versions' in req.params: version_id = o.get('version_id', o.get('sysmeta_version_id', 'null')) if o.get('content_type') == DELETE_MARKER_CONTENT_TYPE: contents = SubElement(elem, 'DeleteMarker') else: contents = SubElement(elem, 'Version') SubElement(contents, 'Key').text = \ o['name'].encode('utf-8') SubElement(contents, 'VersionId').text = version_id SubElement(contents, 'IsLatest').text = str( 'version_id' not in o).lower() else: contents = SubElement(elem, 'Contents') SubElement(contents, 'Key').text = \ o['name'].encode('utf-8') SubElement(contents, 'LastModified').text = \ o['last_modified'][:-3] + 'Z' if contents.tag != 'DeleteMarker': if 's3_etag' in o: # New-enough MUs are already in the right format etag = o['s3_etag'] elif 'slo_etag' in o: # SLOs may be in something *close* to the MU format etag = '"%s-N"' % o['slo_etag'].strip('"') else: etag = o['hash'] if len(etag) < 2 or etag[::len(etag) - 1] != '""': # Normal objects just use the MD5 etag = '"%s"' % o['hash'] # This also catches sufficiently-old SLOs, # but we have no way to identify those # from container listings # Otherwise, somebody somewhere (proxyfs, maybe?) # made this look like an RFC-compliant ETag; # we don't need to quote-wrap. SubElement(contents, 'ETag').text = etag SubElement(contents, 'Size').text = str(o['bytes']) if fetch_owner or not is_v2: owner = SubElement(contents, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id if contents.tag != 'DeleteMarker': SubElement(contents, 'StorageClass').text = 'STANDARD' for o in objects: if 'subdir' in o: common_prefixes = SubElement(elem, 'CommonPrefixes') SubElement(common_prefixes, 'Prefix').text = \ o['subdir'].encode('utf-8') body = tostring(elem, encoding_type=encoding_type) resp = HTTPOk(body=body, content_type='application/xml') origin = req.headers.get('Origin') if origin: rule = get_cors(self.app, req, "GET", origin) if rule: cors_fill_headers(req, resp, rule) return resp
def POST(self, req): """ Handles Delete Multiple Objects. """ def object_key_iter(elem): for obj in elem.iterchildren('Object'): key = obj.find('./Key').text if not key: raise UserKeyMustBeSpecified() version = obj.find('./VersionId') if version is not None: version = version.text yield key, version try: xml = req.xml(MAX_MULTI_DELETE_BODY_SIZE, check_md5=True) elem = fromstring(xml, 'Delete') quiet = elem.find('./Quiet') if quiet is not None and quiet.text.lower() == 'true': self.quiet = True else: self.quiet = False delete_list = list(object_key_iter(elem)) if len(delete_list) > CONF.max_multi_delete_objects: raise MalformedXML() except (XMLSyntaxError, DocumentInvalid): raise MalformedXML() except ErrorResponse: raise except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() LOGGER.error(e) raise exc_type, exc_value, exc_traceback elem = Element('DeleteResult') # check bucket existence try: req.get_response(self.app, 'HEAD') except AccessDenied as error: body = self._gen_error_body(error, elem, delete_list) return HTTPOk(body=body) for key, version in delete_list: if version is not None: # TODO: delete the specific version of the object raise S3NotImplemented() req.object_name = key try: query = req.gen_multipart_manifest_delete_query(self.app) req.get_response(self.app, method='DELETE', query=query) except NoSuchKey: pass except ErrorResponse as e: error = SubElement(elem, 'Error') SubElement(error, 'Key').text = key SubElement(error, 'Code').text = e.__class__.__name__ SubElement(error, 'Message').text = e._msg continue if not self.quiet: deleted = SubElement(elem, 'Deleted') SubElement(deleted, 'Key').text = key body = tostring(elem) return HTTPOk(body=body)
def GET(self, req): """ Handle GET Bucket (List Objects) request """ max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing) # TODO: Separate max_bucket_listing and default_bucket_listing tag_max_keys = max_keys max_keys = min(max_keys, CONF.max_bucket_listing) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) query = { 'format': 'json', 'limit': max_keys + 1, } if 'marker' in req.params: query.update({'marker': req.params['marker']}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) if 'delimiter' in req.params: query.update({'delimiter': req.params['delimiter']}) resp = req.get_response(self.app, query=query) objects = json.loads(resp.body) elem = Element('ListBucketResult') SubElement(elem, 'Name').text = req.container_name SubElement(elem, 'Prefix').text = req.params.get('prefix') SubElement(elem, 'Marker').text = req.params.get('marker') # in order to judge that truncated is valid, check whether # max_keys + 1 th element exists in swift. is_truncated = max_keys > 0 and len(objects) > max_keys objects = objects[:max_keys] if is_truncated and 'delimiter' in req.params: if 'name' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['name'] if 'subdir' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['subdir'] SubElement(elem, 'MaxKeys').text = str(tag_max_keys) if 'delimiter' in req.params: SubElement(elem, 'Delimiter').text = req.params['delimiter'] if encoding_type is not None: SubElement(elem, 'EncodingType').text = encoding_type SubElement(elem, 'IsTruncated').text = \ 'true' if is_truncated else 'false' for o in objects: if 'subdir' not in o: contents = SubElement(elem, 'Contents') SubElement(contents, 'Key').text = o['name'] SubElement(contents, 'LastModified').text = \ o['last_modified'][:-3] + 'Z' SubElement(contents, 'ETag').text = o['hash'] SubElement(contents, 'Size').text = str(o['bytes']) owner = SubElement(contents, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id SubElement(contents, 'StorageClass').text = 'STANDARD' for o in objects: if 'subdir' in o: common_prefixes = SubElement(elem, 'CommonPrefixes') SubElement(common_prefixes, 'Prefix').text = o['subdir'] body = tostring(elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handle GET Bucket (List Objects) request """ max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing) # TODO: Separate max_bucket_listing and default_bucket_listing tag_max_keys = max_keys max_keys = min(max_keys, CONF.max_bucket_listing) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) query = { 'format': 'json', 'limit': max_keys + 1, } if 'marker' in req.params: query.update({'marker': req.params['marker']}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) if 'delimiter' in req.params: query.update({'delimiter': req.params['delimiter']}) # GET Bucket (List Objects) Version 2 parameters is_v2 = int(req.params.get('list-type', '1')) == 2 fetch_owner = False if is_v2: if 'start-after' in req.params: query.update({'marker': req.params['start-after']}) # continuation-token overrides start-after if 'continuation-token' in req.params: decoded = b64decode(req.params['continuation-token']) query.update({'marker': decoded}) if 'fetch-owner' in req.params: fetch_owner = config_true_value(req.params['fetch-owner']) resp = req.get_response(self.app, query=query) objects = json.loads(resp.body) elem = Element('ListBucketResult') SubElement(elem, 'Name').text = req.container_name SubElement(elem, 'Prefix').text = req.params.get('prefix') # in order to judge that truncated is valid, check whether # max_keys + 1 th element exists in swift. is_truncated = max_keys > 0 and len(objects) > max_keys objects = objects[:max_keys] if not is_v2: SubElement(elem, 'Marker').text = req.params.get('marker') if is_truncated and 'delimiter' in req.params: if 'name' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['name'] if 'subdir' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['subdir'] else: if is_truncated: if 'name' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['name']) if 'subdir' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['subdir']) if 'continuation-token' in req.params: SubElement(elem, 'ContinuationToken').text = \ req.params['continuation-token'] if 'start-after' in req.params: SubElement(elem, 'StartAfter').text = \ req.params['start-after'] SubElement(elem, 'KeyCount').text = str(len(objects)) SubElement(elem, 'MaxKeys').text = str(tag_max_keys) if 'delimiter' in req.params: SubElement(elem, 'Delimiter').text = req.params['delimiter'] if encoding_type is not None: SubElement(elem, 'EncodingType').text = encoding_type SubElement(elem, 'IsTruncated').text = \ 'true' if is_truncated else 'false' for o in objects: if 'subdir' not in o: contents = SubElement(elem, 'Contents') SubElement(contents, 'Key').text = o['name'] SubElement(contents, 'LastModified').text = \ o['last_modified'][:-3] + 'Z' if 's3_etag' in o.get('content_type', ''): _, o['hash'] = extract_s3_etag(o['content_type']) SubElement(contents, 'ETag').text = '"%s"' % o['hash'] SubElement(contents, 'Size').text = str(o['bytes']) if fetch_owner or not is_v2: owner = SubElement(contents, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id SubElement(contents, 'StorageClass').text = 'STANDARD' for o in objects: if 'subdir' in o: common_prefixes = SubElement(elem, 'CommonPrefixes') SubElement(common_prefixes, 'Prefix').text = o['subdir'] body = tostring(elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')
def GET(self, req): """ Handle GET Bucket (List Objects) request """ max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing) # TODO: Separate max_bucket_listing and default_bucket_listing tag_max_keys = max_keys max_keys = min(max_keys, CONF.max_bucket_listing) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) query = { 'format': 'json', 'limit': max_keys + 1, } if 'marker' in req.params: query.update({'marker': req.params['marker']}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) if 'delimiter' in req.params: query.update({'delimiter': req.params['delimiter']}) # GET Bucket (List Objects) Version 2 parameters is_v2 = int(req.params.get('list-type', '1')) == 2 fetch_owner = False if is_v2: if 'start-after' in req.params: query.update({'marker': req.params['start-after']}) # continuation-token overrides start-after if 'continuation-token' in req.params: decoded = b64decode(req.params['continuation-token']) query.update({'marker': decoded}) if 'fetch-owner' in req.params: fetch_owner = config_true_value(req.params['fetch-owner']) resp = req.get_response(self.app, query=query) objects = json.loads(resp.body) if 'versions' in req.params: req.container_name += VERSIONING_SUFFIX query['reverse'] = 'true' try: resp = req.get_response(self.app, query=query) versioned_objects = json.loads(resp.body) for o in versioned_objects: # The name looks like this: # '%03x%s/%s' % (len(name), name, version) o['name'], o['version_id'] = o['name'][3:].split('/', 1) objects.extend(versioned_objects) except NoSuchBucket: # the bucket may not be versioned pass req.container_name = req.container_name[:-len(VERSIONING_SUFFIX)] objects.sort(key=lambda o: o['name']) for o in objects: if not o.get('version_id'): info = req.get_object_info( self.app, object_name=o['name']) o['sysmeta_version_id'] = info.get('sysmeta', {}).get( 'version-id', 'null') if 'versions' in req.params: elem = Element('ListVersionsResult') else: elem = Element('ListBucketResult') SubElement(elem, 'Name').text = req.container_name SubElement(elem, 'Prefix').text = req.params.get('prefix') # Filter objects according to version-id-marker and key-marker v_marker = req.params.get('version-id-marker') k_marker = req.params.get('key-marker') k_marker_matched = not bool(k_marker) if 'versions' in req.params and (v_marker or k_marker): to_delete = [] for i, o in enumerate(objects): if 'subdir' not in o: version_id = o.get('version_id', o.get('sysmeta_version_id', 'null')) if not k_marker_matched and k_marker != o['name']: to_delete.append(i) if k_marker == o['name']: k_marker_matched = True if k_marker == o['name'] and v_marker: if v_marker == version_id: v_marker = None to_delete.append(i) for i in reversed(to_delete): objects.pop(i) # in order to judge that truncated is valid, check whether # max_keys + 1 th element exists in swift. is_truncated = max_keys > 0 and len(objects) > max_keys objects = objects[:max_keys] if not is_v2: if 'versions' in req.params: SubElement(elem, 'KeyMarker').text = req.params.get( 'key-marker') SubElement(elem, 'VersionIdMarker').text = req.params.get( 'version-id-marker') else: SubElement(elem, 'Marker').text = req.params.get('marker') if is_truncated and 'delimiter' in req.params: if 'name' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['name'] if 'subdir' in objects[-1]: SubElement(elem, 'NextMarker').text = \ objects[-1]['subdir'] else: if is_truncated: if 'name' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['name']) if 'subdir' in objects[-1]: SubElement(elem, 'NextContinuationToken').text = \ b64encode(objects[-1]['subdir']) if 'continuation-token' in req.params: SubElement(elem, 'ContinuationToken').text = \ req.params['continuation-token'] if 'start-after' in req.params: SubElement(elem, 'StartAfter').text = \ req.params['start-after'] SubElement(elem, 'KeyCount').text = str(len(objects)) SubElement(elem, 'MaxKeys').text = str(tag_max_keys) if 'delimiter' in req.params: SubElement(elem, 'Delimiter').text = req.params['delimiter'] if encoding_type is not None: SubElement(elem, 'EncodingType').text = encoding_type SubElement(elem, 'IsTruncated').text = \ 'true' if is_truncated else 'false' for o in objects: if 'subdir' not in o: if 'versions' in req.params: version_id = o.get('version_id', o.get('sysmeta_version_id', 'null')) if o.get('content_type') == DELETE_MARKER_CONTENT_TYPE: contents = SubElement(elem, 'DeleteMarker') else: contents = SubElement(elem, 'Version') SubElement(contents, 'Key').text = o['name'] SubElement(contents, 'VersionId').text = version_id SubElement(contents, 'IsLatest').text = str( 'version_id' not in o).lower() else: contents = SubElement(elem, 'Contents') SubElement(contents, 'Key').text = o['name'] SubElement(contents, 'LastModified').text = \ o['last_modified'][:-3] + 'Z' if 's3_etag' in o.get('content_type', ''): _, o['hash'] = extract_s3_etag(o['content_type']) if contents.tag != 'DeleteMarker': SubElement(contents, 'ETag').text = '"%s"' % o['hash'] SubElement(contents, 'Size').text = str(o['bytes']) if fetch_owner or not is_v2: owner = SubElement(contents, 'Owner') SubElement(owner, 'ID').text = req.user_id SubElement(owner, 'DisplayName').text = req.user_id if contents.tag != 'DeleteMarker': SubElement(contents, 'StorageClass').text = 'STANDARD' for o in objects: if 'subdir' in o: common_prefixes = SubElement(elem, 'CommonPrefixes') SubElement(common_prefixes, 'Prefix').text = o['subdir'] body = tostring(elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')
def elem(self): elem = Element('Grantee', nsmap={'xsi': XMLNS_XSI}) elem.set('{%s}type' % XMLNS_XSI, self.type) SubElement(elem, 'ID').text = self.id SubElement(elem, 'DisplayName').text = self.display_name return elem
def _gen_location_xml(self, location): elem = Element('CreateBucketConfiguration') SubElement(elem, 'LocationConstraint').text = location return tostring(elem)
def GET(self, req): """ Handles List Multipart Uploads """ def separate_uploads(uploads, prefix, delimiter): """ separate_uploads will separate uploads into non_delimited_uploads (a subset of uploads) and common_prefixes according to the specified delimiter. non_delimited_uploads is a list of uploads which exclude the delimiter. common_prefixes is a set of prefixes prior to the specified delimiter. Note that the prefix in the common_prefixes includes the delimiter itself. i.e. if '/' delimiter specified and then the uploads is consists of ['foo', 'foo/bar'], this function will return (['foo'], ['foo/']). :param uploads: A list of uploads dictionary :param prefix: A string of prefix reserved on the upload path. (i.e. the delimiter must be searched behind the prefix) :param delimiter: A string of delimiter to split the path in each upload :return (non_delimited_uploads, common_prefixes) """ (prefix, delimiter) = \ utf8encode(prefix, delimiter) non_delimited_uploads = [] common_prefixes = set() for upload in uploads: key = upload['key'] end = key.find(delimiter, len(prefix)) if end >= 0: common_prefix = key[:end + len(delimiter)] common_prefixes.add(common_prefix) else: non_delimited_uploads.append(upload) return non_delimited_uploads, sorted(common_prefixes) encoding_type = req.params.get('encoding-type') if encoding_type is not None and encoding_type != 'url': err_msg = 'Invalid Encoding Method specified in Request' raise InvalidArgument('encoding-type', encoding_type, err_msg) keymarker = req.params.get('key-marker', '') uploadid = req.params.get('upload-id-marker', '') maxuploads = req.get_validated_param( 'max-uploads', DEFAULT_MAX_UPLOADS, DEFAULT_MAX_UPLOADS) query = { 'format': 'json', 'limit': maxuploads + 1, } if uploadid and keymarker: query.update({'marker': '%s/%s' % (keymarker, uploadid)}) elif keymarker: query.update({'marker': '%s/~' % (keymarker)}) if 'prefix' in req.params: query.update({'prefix': req.params['prefix']}) container = req.container_name + MULTIUPLOAD_SUFFIX try: resp = req.get_response(self.app, container=container, query=query) objects = json.loads(resp.body) except NoSuchBucket: # Assume NoSuchBucket as no uploads objects = [] def object_to_upload(object_info): obj, upid = object_info['name'].rsplit('/', 1) obj_dict = {'key': obj, 'upload_id': upid, 'last_modified': object_info['last_modified']} return obj_dict # uploads is a list consists of dict, {key, upload_id, last_modified} # Note that pattern matcher will drop whole segments objects like as # object_name/upload_id/1. pattern = re.compile('/[0-9]+$') uploads = [object_to_upload(obj) for obj in objects if pattern.search(obj.get('name', '')) is None] prefixes = [] if 'delimiter' in req.params: prefix = req.params.get('prefix', '') delimiter = req.params['delimiter'] uploads, prefixes = \ separate_uploads(uploads, prefix, delimiter) if len(uploads) > maxuploads: uploads = uploads[:maxuploads] truncated = True else: truncated = False nextkeymarker = '' nextuploadmarker = '' if len(uploads) > 1: nextuploadmarker = uploads[-1]['upload_id'] nextkeymarker = uploads[-1]['key'] result_elem = Element('ListMultipartUploadsResult') SubElement(result_elem, 'Bucket').text = req.container_name SubElement(result_elem, 'KeyMarker').text = keymarker SubElement(result_elem, 'UploadIdMarker').text = uploadid SubElement(result_elem, 'NextKeyMarker').text = nextkeymarker SubElement(result_elem, 'NextUploadIdMarker').text = nextuploadmarker if 'delimiter' in req.params: SubElement(result_elem, 'Delimiter').text = \ req.params['delimiter'] if 'prefix' in req.params: SubElement(result_elem, 'Prefix').text = req.params['prefix'] SubElement(result_elem, 'MaxUploads').text = str(maxuploads) if encoding_type is not None: SubElement(result_elem, 'EncodingType').text = encoding_type SubElement(result_elem, 'IsTruncated').text = \ 'true' if truncated else 'false' # TODO: don't show uploads which are initiated before this bucket is # created. for u in uploads: upload_elem = SubElement(result_elem, 'Upload') SubElement(upload_elem, 'Key').text = u['key'] SubElement(upload_elem, 'UploadId').text = u['upload_id'] initiator_elem = SubElement(upload_elem, 'Initiator') SubElement(initiator_elem, 'ID').text = req.user_id SubElement(initiator_elem, 'DisplayName').text = req.user_id owner_elem = SubElement(upload_elem, 'Owner') SubElement(owner_elem, 'ID').text = req.user_id SubElement(owner_elem, 'DisplayName').text = req.user_id SubElement(upload_elem, 'StorageClass').text = 'STANDARD' SubElement(upload_elem, 'Initiated').text = \ u['last_modified'][:-3] + 'Z' for p in prefixes: elem = SubElement(result_elem, 'CommonPrefixes') SubElement(elem, 'Prefix').text = p body = tostring(result_elem, encoding_type=encoding_type) return HTTPOk(body=body, content_type='application/xml')