コード例 #1
0
    def GET(self, req):
        """
        Handles List Multipart Uploads
        """

        log_s3api_command(req, 'list-multipart-uploads')

        def separate_uploads(uploads, prefix, delimiter):
            """
            separate_uploads will separate uploads into non_delimited_uploads
            (a subset of uploads) and common_prefixes according to the
            specified delimiter. non_delimited_uploads is a list of uploads
            which exclude the delimiter. common_prefixes is a set of prefixes
            prior to the specified delimiter. Note that the prefix in the
            common_prefixes includes the delimiter itself.

            i.e. if '/' delimiter specified and then the uploads is consists of
            ['foo', 'foo/bar'], this function will return (['foo'], ['foo/']).

            :param uploads: A list of uploads dictionary
            :param prefix: A string of prefix reserved on the upload path.
                           (i.e. the delimiter must be searched behind the
                            prefix)
            :param delimiter: A string of delimiter to split the path in each
                              upload

            :return (non_delimited_uploads, common_prefixes)
            """
            (prefix, delimiter) = \
                utf8encode(prefix, delimiter)
            non_delimited_uploads = []
            common_prefixes = set()
            for upload in uploads:
                key = upload['key']
                end = key.find(delimiter, len(prefix))
                if end >= 0:
                    common_prefix = key[:end + len(delimiter)]
                    common_prefixes.add(common_prefix)
                else:
                    non_delimited_uploads.append(upload)
            return non_delimited_uploads, sorted(common_prefixes)

        encoding_type = req.params.get('encoding-type')
        if encoding_type is not None and encoding_type != 'url':
            err_msg = 'Invalid Encoding Method specified in Request'
            raise InvalidArgument('encoding-type', encoding_type, err_msg)

        keymarker = req.params.get('key-marker', '')
        uploadid = req.params.get('upload-id-marker', '')
        maxuploads = req.get_validated_param('max-uploads',
                                             DEFAULT_MAX_UPLOADS,
                                             DEFAULT_MAX_UPLOADS)

        query = {
            'format': 'json',
            'limit': maxuploads + 1,
        }

        if uploadid and keymarker:
            query.update({'marker': '%s/%s' % (keymarker, uploadid)})
        elif keymarker:
            query.update({'marker': '%s/~' % (keymarker)})
        if 'prefix' in req.params:
            query.update({'prefix': req.params['prefix']})

        container = req.container_name + MULTIUPLOAD_SUFFIX
        try:
            req.environ['oio.list_mpu'] = True
            resp = req.get_response(self.app, container=container, query=query)
            objects = json.loads(resp.body)
        except NoSuchBucket:
            # Assume NoSuchBucket as no uploads
            objects = []

        def object_to_upload(object_info):
            obj, upid = object_info['name'].rsplit('/', 1)
            obj_dict = {
                'key': obj,
                'upload_id': upid,
                'last_modified': object_info['last_modified']
            }
            return obj_dict

        # uploads is a list consists of dict, {key, upload_id, last_modified}
        # Note that pattern matcher will drop whole segments objects like as
        # object_name/upload_id/1.
        pattern = re.compile('/[0-9]+$')
        uploads = [
            object_to_upload(obj) for obj in objects
            if pattern.search(obj.get('name', '')) is None
        ]

        prefixes = []
        if 'delimiter' in req.params:
            prefix = req.params.get('prefix', '')
            delimiter = req.params['delimiter']
            uploads, prefixes = \
                separate_uploads(uploads, prefix, delimiter)

        if len(uploads) > maxuploads:
            uploads = uploads[:maxuploads]
            truncated = True
        else:
            truncated = False

        nextkeymarker = ''
        nextuploadmarker = ''
        if len(uploads) > 1:
            nextuploadmarker = uploads[-1]['upload_id']
            nextkeymarker = uploads[-1]['key']

        result_elem = Element('ListMultipartUploadsResult')
        if encoding_type is not None:
            result_elem.encoding_type = encoding_type
        SubElement(result_elem, 'Bucket').text = req.container_name
        SubElement(result_elem, 'KeyMarker').text = keymarker
        SubElement(result_elem, 'UploadIdMarker').text = uploadid
        SubElement(result_elem, 'NextKeyMarker').text = nextkeymarker
        SubElement(result_elem, 'NextUploadIdMarker').text = nextuploadmarker
        if 'delimiter' in req.params:
            SubElement(result_elem, 'Delimiter').text = \
                req.params['delimiter']
        if 'prefix' in req.params:
            SubElement(result_elem, 'Prefix').text = req.params['prefix']
        SubElement(result_elem, 'MaxUploads').text = str(maxuploads)
        if encoding_type is not None:
            SubElement(result_elem, 'EncodingType').text = encoding_type
        SubElement(result_elem, 'IsTruncated').text = \
            'true' if truncated else 'false'

        # TODO: don't show uploads which are initiated before this bucket is
        # created.
        for u in uploads:
            upload_elem = SubElement(result_elem, 'Upload')
            SubElement(upload_elem, 'Key').text = u['key']
            SubElement(upload_elem, 'UploadId').text = u['upload_id']
            initiator_elem = SubElement(upload_elem, 'Initiator')
            SubElement(initiator_elem, 'ID').text = req.user_id
            SubElement(initiator_elem, 'DisplayName').text = req.user_id
            owner_elem = SubElement(upload_elem, 'Owner')
            SubElement(owner_elem, 'ID').text = req.user_id
            SubElement(owner_elem, 'DisplayName').text = req.user_id
            SubElement(upload_elem, 'StorageClass').text = 'STANDARD'
            SubElement(upload_elem, 'Initiated').text = \
                u['last_modified'][:-3] + 'Z'

        for p in prefixes:
            elem = SubElement(result_elem, 'CommonPrefixes')
            SubElement(elem, 'Prefix').text = p

        body = tostring(result_elem, encoding_type=encoding_type)

        return HTTPOk(body=body, content_type='application/xml')
コード例 #2
0
    def GET(self, req):
        """
        Handles List Parts.
        """
        log_s3api_command(req, 'list-parts')

        def filter_part_num_marker(o):
            try:
                num = int(os.path.basename(o['name']))
                return num > part_num_marker
            except ValueError:
                return False

        encoding_type = req.params.get('encoding-type')
        if encoding_type is not None and encoding_type != 'url':
            err_msg = 'Invalid Encoding Method specified in Request'
            raise InvalidArgument('encoding-type', encoding_type, err_msg)

        upload_id = req.params['uploadId']
        _check_upload_info(req, self.app, upload_id)

        maxparts = req.get_validated_param('max-parts',
                                           DEFAULT_MAX_PARTS_LISTING,
                                           CONF.max_parts_listing)
        part_num_marker = req.get_validated_param('part-number-marker', 0)

        query = {
            'format': 'json',
            'limit': maxparts + 1,
            'prefix': '%s/%s/' % (req.object_name, upload_id),
            'delimiter': '/'
        }

        container = req.container_name + MULTIUPLOAD_SUFFIX
        resp = req.get_response(self.app,
                                container=container,
                                obj='',
                                query=query)
        objects = json.loads(resp.body)

        last_part = 0

        # If the caller requested a list starting at a specific part number,
        # construct a sub-set of the object list.
        objList = filter(filter_part_num_marker, objects)

        # pylint: disable-msg=E1103
        objList.sort(key=lambda o: int(o['name'].split('/')[-1]))

        if len(objList) > maxparts:
            objList = objList[:maxparts]
            truncated = True
        else:
            truncated = False
        # TODO: We have to retrieve object list again when truncated is True
        # and some objects filtered by invalid name because there could be no
        # enough objects for limit defined by maxparts.

        if objList:
            o = objList[-1]
            last_part = os.path.basename(o['name'])

        result_elem = Element('ListPartsResult')
        if encoding_type is not None:
            result_elem.encoding_type = encoding_type
        SubElement(result_elem, 'Bucket').text = req.container_name
        SubElement(result_elem, 'Key').text = req.object_name
        SubElement(result_elem, 'UploadId').text = upload_id

        initiator_elem = SubElement(result_elem, 'Initiator')
        SubElement(initiator_elem, 'ID').text = req.user_id
        SubElement(initiator_elem, 'DisplayName').text = req.user_id
        owner_elem = SubElement(result_elem, 'Owner')
        SubElement(owner_elem, 'ID').text = req.user_id
        SubElement(owner_elem, 'DisplayName').text = req.user_id

        SubElement(result_elem, 'StorageClass').text = 'STANDARD'
        SubElement(result_elem, 'PartNumberMarker').text = str(part_num_marker)
        SubElement(result_elem, 'NextPartNumberMarker').text = str(last_part)
        SubElement(result_elem, 'MaxParts').text = str(maxparts)
        if 'encoding-type' in req.params:
            SubElement(result_elem, 'EncodingType').text = \
                req.params['encoding-type']
        SubElement(result_elem, 'IsTruncated').text = \
            'true' if truncated else 'false'

        for i in objList:
            part_elem = SubElement(result_elem, 'Part')
            SubElement(part_elem, 'PartNumber').text = i['name'].split('/')[-1]
            SubElement(part_elem, 'LastModified').text = \
                i['last_modified'][:-3] + 'Z'
            SubElement(part_elem, 'ETag').text = '"%s"' % i['hash']
            SubElement(part_elem, 'Size').text = str(i['bytes'])

        body = tostring(result_elem, encoding_type=encoding_type)

        return HTTPOk(body=body, content_type='application/xml')
コード例 #3
0
ファイル: bucket.py プロジェクト: newtoncorp/swift3
    def GET(self, req):
        """
        Handle GET Bucket (List Objects) request
        """

        max_keys = req.get_validated_param('max-keys', CONF.max_bucket_listing)
        # TODO: Separate max_bucket_listing and default_bucket_listing
        tag_max_keys = max_keys
        max_keys = min(max_keys, CONF.max_bucket_listing)

        encoding_type = req.params.get('encoding-type')
        if encoding_type is not None and encoding_type != 'url':
            err_msg = 'Invalid Encoding Method specified in Request'
            raise InvalidArgument('encoding-type', encoding_type, err_msg)

        query = {
            'format': 'json',
            'limit': max_keys + 1,
        }
        if 'marker' in req.params:
            query.update({'marker': req.params['marker']})
        if 'prefix' in req.params:
            query.update({'prefix': req.params['prefix']})
        if 'delimiter' in req.params:
            query.update({'delimiter': req.params['delimiter']})

        # GET Bucket (List Objects) Version 2 parameters
        is_v2 = int(req.params.get('list-type', '1')) == 2
        fetch_owner = False
        if is_v2:
            if 'start-after' in req.params:
                query.update({'marker': req.params['start-after']})
            # continuation-token overrides start-after
            if 'continuation-token' in req.params:
                decoded = b64decode(req.params['continuation-token'])
                query.update({'marker': decoded})
            if 'fetch-owner' in req.params:
                fetch_owner = config_true_value(req.params['fetch-owner'])

        resp = req.get_response(self.app, query=query)

        objects = json.loads(resp.body)

        if 'versions' in req.params:
            req.container_name += VERSIONING_SUFFIX
            query['reverse'] = 'true'
            try:
                resp = req.get_response(self.app, query=query)
                versioned_objects = json.loads(resp.body)
                prefixes = set()
                for o in versioned_objects:
                    if 'name' in o:
                        # The name looks like this:
                        #  '%03x%s/%s' % (len(name), name, version)
                        o['name'], o['version_id'] = \
                            o['name'][3:].rsplit('/', 1)
                    else:
                        prefixes.add(o['subdir'])
                # suppress duplicated prefixes
                for o in list(objects):
                    if 'subdir' in o and o['subdir'] in prefixes:
                        objects.remove(o)
                objects.extend(versioned_objects)
            except NoSuchBucket:
                # the bucket may not be versioned
                pass
            req.container_name = req.container_name[:-len(VERSIONING_SUFFIX)]
            objects.sort(key=lambda o: o.get('name') or o.get('subdir'))
            for o in objects:
                if 'subdir' not in o and not o.get('version_id'):
                    info = req.get_object_info(
                        self.app, object_name=o['name'].encode('utf-8'))
                    o['sysmeta_version_id'] = info.get('sysmeta', {}).get(
                        'version-id', 'null')

        if 'versions' in req.params:
            elem = Element('ListVersionsResult')
        else:
            elem = Element('ListBucketResult')

        if encoding_type is not None:
            elem.encoding_type = encoding_type

        SubElement(elem, 'Name').text = req.container_name
        SubElement(elem, 'Prefix').text = req.params.get('prefix')

        # Filter objects according to version-id-marker and key-marker
        v_marker = req.params.get('version-id-marker')
        k_marker = req.params.get('key-marker')
        k_marker_matched = not bool(k_marker)
        if 'versions' in req.params and (v_marker or k_marker):
            to_delete = []
            for i, o in enumerate(objects):
                if 'subdir' not in o:
                    version_id = o.get('version_id',
                                       o.get('sysmeta_version_id', 'null'))

                    if not k_marker_matched and k_marker != o['name']:
                        to_delete.append(i)
                    if k_marker == o['name']:
                        k_marker_matched = True

                    if k_marker == o['name'] and v_marker:

                        if v_marker == version_id:
                            v_marker = None
                        to_delete.append(i)
            for i in reversed(to_delete):
                objects.pop(i)

        # in order to judge that truncated is valid, check whether
        # max_keys + 1 th element exists in swift.
        is_truncated = max_keys > 0 and len(objects) > max_keys
        objects = objects[:max_keys]

        if not is_v2:
            if 'versions' in req.params:
                SubElement(elem,
                           'KeyMarker').text = req.params.get('key-marker')
                SubElement(elem, 'VersionIdMarker').text = req.params.get(
                    'version-id-marker')
            else:
                SubElement(elem, 'Marker').text = req.params.get('marker')
            if is_truncated and 'delimiter' in req.params:
                if 'name' in objects[-1]:
                    SubElement(elem, 'NextMarker').text = \
                        objects[-1]['name']
                if 'subdir' in objects[-1]:
                    SubElement(elem, 'NextMarker').text = \
                        objects[-1]['subdir']
        else:
            if is_truncated:
                if 'name' in objects[-1]:
                    SubElement(elem, 'NextContinuationToken').text = \
                        b64encode(objects[-1]['name'].encode('utf8'))
                if 'subdir' in objects[-1]:
                    SubElement(elem, 'NextContinuationToken').text = \
                        b64encode(objects[-1]['subdir'].encode('utf8'))
            if 'continuation-token' in req.params:
                SubElement(elem, 'ContinuationToken').text = \
                    req.params['continuation-token']
            if 'start-after' in req.params:
                SubElement(elem, 'StartAfter').text = \
                    req.params['start-after']
            SubElement(elem, 'KeyCount').text = str(len(objects))

        SubElement(elem, 'MaxKeys').text = str(tag_max_keys)

        if 'delimiter' in req.params:
            SubElement(elem, 'Delimiter').text = req.params['delimiter']

        if encoding_type is not None:
            SubElement(elem, 'EncodingType').text = encoding_type

        SubElement(elem, 'IsTruncated').text = \
            'true' if is_truncated else 'false'

        for o in objects:
            if 'subdir' not in o:
                if 'versions' in req.params:
                    version_id = o.get('version_id',
                                       o.get('sysmeta_version_id', 'null'))

                    if o.get('content_type') == DELETE_MARKER_CONTENT_TYPE:
                        contents = SubElement(elem, 'DeleteMarker')
                    else:
                        contents = SubElement(elem, 'Version')
                    SubElement(contents, 'Key').text = \
                        o['name'].encode('utf-8')
                    SubElement(contents, 'VersionId').text = version_id
                    SubElement(
                        contents,
                        'IsLatest').text = str('version_id' not in o).lower()
                else:
                    contents = SubElement(elem, 'Contents')
                    SubElement(contents, 'Key').text = \
                        o['name'].encode('utf-8')
                SubElement(contents, 'LastModified').text = \
                    o['last_modified'][:-3] + 'Z'
                if 's3_etag' in o.get('content_type', ''):
                    _, o['hash'] = extract_s3_etag(o['content_type'])
                if contents.tag != 'DeleteMarker':
                    SubElement(contents, 'ETag').text = '"%s"' % o['hash']
                    SubElement(contents, 'Size').text = str(o['bytes'])
                if fetch_owner or not is_v2:
                    owner = SubElement(contents, 'Owner')
                    SubElement(owner, 'ID').text = req.user_id
                    SubElement(owner, 'DisplayName').text = req.user_id
                if contents.tag != 'DeleteMarker':
                    SubElement(contents, 'StorageClass').text = 'STANDARD'

        for o in objects:
            if 'subdir' in o:
                common_prefixes = SubElement(elem, 'CommonPrefixes')
                SubElement(common_prefixes, 'Prefix').text = \
                    o['subdir'].encode('utf-8')

        body = tostring(elem, encoding_type=encoding_type)

        resp = HTTPOk(body=body, content_type='application/xml')

        origin = req.headers.get('Origin')
        if origin:
            rule = get_cors(self.app, req, "GET", origin)
            if rule:
                cors_fill_headers(req, resp, rule)

        return resp