def _manifest_get_response(self, req, content_length, response_headers, segments): self.first_byte, self.last_byte = None, None if req.range: byteranges = req.range.ranges_for_length(content_length) if len(byteranges) == 0: return HTTPRequestedRangeNotSatisfiable(request=req) elif len(byteranges) == 1: self.first_byte, self.last_byte = byteranges[0] # For some reason, swob.Range.ranges_for_length adds 1 to the # last byte's position. self.last_byte -= 1 else: req.range = None ver, account, _junk = req.split_path(3, 3, rest_with_last=True) plain_listing_iter = self._segment_listing_iterator( req, ver, account, segments) ratelimited_listing_iter = RateLimitedIterator( plain_listing_iter, self.slo.rate_limit_segments_per_sec, limit_after=self.slo.rate_limit_after_segment) # self._segment_listing_iterator gives us 3-tuples of (segment dict, # start byte, end byte), but SegmentedIterable wants (obj path, etag, # size, start byte, end byte), so we clean that up here segment_listing_iter = ( ("/{ver}/{acc}/{conobj}".format( ver=ver, acc=account, conobj=seg_dict['name'].lstrip('/')), seg_dict['hash'], int(seg_dict['bytes']), start_byte, end_byte) for seg_dict, start_byte, end_byte in ratelimited_listing_iter) segmented_iter = SegmentedIterable(req, self.slo.app, segment_listing_iter, name=req.path, logger=self.slo.logger, ua_suffix="SLO MultipartGET", swift_source="SLO", max_get_time=self.slo.max_get_time) try: segmented_iter.validate_first_segment() except (ListingIterError, SegmentError): # Copy from the SLO explanation in top of this file. # If any of the segments from the manifest are not found or # their Etag/Content Length no longer match the connection # will drop. In this case a 409 Conflict will be logged in # the proxy logs and the user will receive incomplete results. return HTTPConflict(request=req) response = Response(request=req, content_length=content_length, headers=response_headers, conditional_response=True, app_iter=segmented_iter) if req.range: response.headers.pop('Etag') return response
def PUT(self, request): try: return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def DELETE(self, req): """Handle HTTP DELETE request.""" drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) req_timestamp = valid_timestamp(req) try: check_drive(self.root, drive, self.mount_check) except ValueError: return HTTPInsufficientStorage(drive=drive, request=req) # policy index is only relevant for delete_obj (and transitively for # auto create accounts) obj_policy_index = self.get_and_validate_policy_index(req) or 0 broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): try: broker.initialize(req_timestamp.internal, obj_policy_index) except DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object # redirect if a shard range exists for the object name redirect = self._redirect_to_shard(req, broker, obj) if redirect: return redirect broker.delete_object(obj, req.headers.get('x-timestamp'), obj_policy_index) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = Timestamp(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req_timestamp.internal) if not broker.is_deleted(): return HTTPConflict(request=req) self._update_sync_store(broker, 'DELETE') resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound()
def PUT(self, request): try: server.check_object_creation = \ swiftonfile.swift.common.constraints.sof_check_object_creation return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container = split_and_validate_path(req, 3, 4) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) if container: # put account container pending_timeout = None if 'x-trans-id' in req.headers: pending_timeout = 3 broker = self._get_account_broker(drive, part, account, pending_timeout=pending_timeout) if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(normalize_timestamp( req.headers.get('x-timestamp') or time.time())) except DatabaseAlreadyExists: pass if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used']) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account broker = self._get_account_broker(drive, part, account) timestamp = normalize_timestamp(req.headers['x-timestamp']) if not os.path.exists(broker.db_file): try: broker.initialize(timestamp) created = True except DatabaseAlreadyExists: created = False elif broker.is_status_deleted(): return self._deleted_response(broker, req, HTTPForbidden, body='Recently deleted') else: created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp)) for key, value in req.headers.iteritems() if key.lower().startswith('x-account-meta-')) if metadata: broker.update_metadata(metadata) if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req)
def get_container_delete_resp(self, req): oio_headers = {REQID_HEADER: self.trans_id} try: self.app.storage.container_delete(self.account_name, self.container_name, headers=oio_headers) except exceptions.ContainerNotEmpty: return HTTPConflict(request=req) resp = HTTPNoContent(request=req) return resp
def __call__(self, env, start_response): if env['REQUEST_METHOD'] == 'GET': if self.status == 200: start_response(Response().status, [('Content-Type', 'text/xml')]) json_pattern = [ '"name":%s', '"last_modified":%s', '"hash":%s', '"bytes":%s' ] json_pattern = '{' + ','.join(json_pattern) + '}' json_out = [] for b in self.objects: name = simplejson.dumps(b[0]) time = simplejson.dumps(b[1]) json_out.append(json_pattern % (name, time, b[2], b[3])) account_list = '[' + ','.join(json_out) + ']' return account_list elif self.status == 401: start_response(HTTPUnauthorized().status, []) elif self.status == 403: start_response(HTTPForbidden().status, []) elif self.status == 404: start_response(HTTPNotFound().status, []) else: start_response(HTTPBadRequest().status, []) elif env['REQUEST_METHOD'] == 'PUT': if self.status == 201: start_response(HTTPCreated().status, []) elif self.status == 401: start_response(HTTPUnauthorized().status, []) elif self.status == 403: start_response(HTTPForbidden().status, []) elif self.status == 202: start_response(HTTPAccepted().status, []) else: start_response(HTTPBadRequest().status, []) elif env['REQUEST_METHOD'] == 'POST': if self.status == 204: start_response(HTTPNoContent().status, []) else: start_response(HTTPBadRequest().status, []) elif env['REQUEST_METHOD'] == 'DELETE': if self.status == 204: start_response(HTTPNoContent().status, []) elif self.status == 401: start_response(HTTPUnauthorized().status, []) elif self.status == 403: start_response(HTTPForbidden().status, []) elif self.status == 404: start_response(HTTPNotFound().status, []) elif self.status == 409: start_response(HTTPConflict().status, []) else: start_response(HTTPBadRequest().status, []) return []
def DELETE(self, req): """Handle HTTP DELETE request.""" drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_container_broker(drive, part, account, container) if account.startswith(self.auto_create_account_prefix) and obj and \ not os.path.exists(broker.db_file): requested_policy_index = (self.get_and_validate_policy_index(req) or POLICIES.default.idx) try: broker.initialize( normalize_timestamp( req.headers.get('x-timestamp') or time.time()), requested_policy_index) except DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() if obj: # delete object broker.delete_object(obj, req.headers.get('x-timestamp')) return HTTPNoContent(request=req) else: # delete container if not broker.empty(): return HTTPConflict(request=req) existed = float(broker.get_info()['put_timestamp']) and \ not broker.is_deleted() broker.delete_db(req.headers['X-Timestamp']) if not broker.is_deleted(): return HTTPConflict(request=req) resp = self.account_update(req, account, container, broker) if resp: return resp if existed: return HTTPNoContent(request=req) return HTTPNotFound()
def get_container_delete_resp(self, req, headers): oio_headers = {'X-oio-req-id': self.trans_id} try: self.app.storage.container_delete( self.account_name, self.container_name, headers=oio_headers) except exceptions.ContainerNotEmpty: return HTTPConflict(request=req) except exceptions.NoSuchContainer: return HTTPNotFound(request=req) resp = HTTPNoContent(request=req) return resp
def _update_or_create(self, req, broker, timestamp, new_container_policy, requested_policy_index): """ Create new database broker or update timestamps for existing database. :param req: the swob request object :param broker: the broker instance for the container :param timestamp: internalized timestamp :param new_container_policy: the storage policy index to use when creating the container :param requested_policy_index: the storage policy index sent in the request, may be None :returns: created, a bool, if database did not previously exist """ if not os.path.exists(broker.db_file): try: broker.initialize(timestamp, new_container_policy) except DatabaseAlreadyExists: pass else: return True # created recreated = broker.is_deleted() if recreated: # only set storage policy on deleted containers broker.set_storage_policy_index(new_container_policy, timestamp=timestamp) elif requested_policy_index is not None: # validate requested policy with existing container if requested_policy_index != broker.storage_policy_index: raise HTTPConflict(request=req, headers={ 'x-backend-storage-policy-index': broker.storage_policy_index }) broker.update_put_timestamp(timestamp) if broker.is_deleted(): raise HTTPConflict(request=req) if recreated: broker.update_status_changed_at(timestamp) return recreated
def get_container_delete_resp(self, req): oio_headers = {REQID_HEADER: self.trans_id} oio_cache = req.environ.get('oio.cache') perfdata = req.environ.get('oio.perfdata') try: self.app.storage.container_delete( self.account_name, self.container_name, headers=oio_headers, cache=oio_cache, perfdata=perfdata) except exceptions.ContainerNotEmpty: return HTTPConflict(request=req) resp = HTTPNoContent(request=req) return resp
def handle_delete(self, req, start_response): """ Handle request to delete a user's container. As part of deleting a container, this middleware will also delete the hidden container holding object versions. Before a user's container can be deleted, swift must check if there are still old object versions from that container. Only after disabling versioning and deleting *all* object versions can a container be deleted. """ container_info = get_container_info(req.environ, self.app, swift_source='OV') versions_cont = unquote( container_info.get('sysmeta', {}).get('versions-container', '')) if versions_cont: account = req.split_path(3, 3, True)[1] # using a HEAD request here as opposed to get_container_info # to make sure we get an up-to-date value versions_req = make_pre_authed_request( req.environ, method='HEAD', swift_source='OV', path=wsgi_quote('/v1/%s/%s' % (account, str_to_wsgi(versions_cont))), headers={'X-Backend-Allow-Reserved-Names': 'true'}) vresp = versions_req.get_response(self.app) drain_and_close(vresp) if vresp.is_success and int( vresp.headers.get('X-Container-Object-Count', 0)) > 0: raise HTTPConflict( 'Delete all versions before deleting container.', request=req) elif not vresp.is_success and vresp.status_int != 404: raise HTTPInternalServerError( 'Error deleting versioned container') else: versions_req.method = 'DELETE' resp = versions_req.get_response(self.app) drain_and_close(resp) if not is_success(resp.status_int) and resp.status_int != 404: raise HTTPInternalServerError( 'Error deleting versioned container') app_resp = self._app_call(req.environ) start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} self._preserve_slo_manifest(metadata, orig_metadata) metadata.update(val for val in request.headers.items() if is_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) try: disk_file.write_metadata(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) return HTTPAccepted(request=request)
def _validate_etag_and_update_sysmeta(self, req, symlink_target_path, etag): if req.environ.get('swift.symlink_override'): req.headers[TGT_ETAG_SYSMETA_SYMLINK_HDR] = etag req.headers[TGT_BYTES_SYSMETA_SYMLINK_HDR] = \ req.headers[TGT_BYTES_SYMLINK_HDR] return # next we'll make sure the E-Tag matches a real object new_req = make_subrequest(req.environ, path=wsgi_quote(symlink_target_path), method='HEAD', swift_source='SYM') if req.allow_reserved_names: new_req.headers['X-Backend-Allow-Reserved-Names'] = 'true' self._last_target_path = symlink_target_path resp = self._recursive_get_head(new_req, target_etag=etag, follow_softlinks=False) if self._get_status_int() == HTTP_NOT_FOUND: raise HTTPConflict(body='X-Symlink-Target does not exist', request=req, headers={ 'Content-Type': 'text/plain', 'Content-Location': self._last_target_path }) if not is_success(self._get_status_int()): drain_and_close(resp) raise status_map[self._get_status_int()](request=req) response_headers = HeaderKeyDict(self._response_headers) # carry forward any etag update params (e.g. "slo_etag"), we'll append # symlink_target_* params to this header after this method returns override_header = get_container_update_override_key('etag') if override_header in response_headers and \ override_header not in req.headers: sep, params = response_headers[override_header].partition(';')[1:] req.headers[override_header] = MD5_OF_EMPTY_STRING + sep + params # It's troublesome that there's so much leakage with SLO if 'X-Object-Sysmeta-Slo-Etag' in response_headers and \ override_header not in req.headers: req.headers[override_header] = '%s; slo_etag=%s' % ( MD5_OF_EMPTY_STRING, response_headers['X-Object-Sysmeta-Slo-Etag']) req.headers[TGT_BYTES_SYSMETA_SYMLINK_HDR] = ( response_headers.get('x-object-sysmeta-slo-size') or response_headers['Content-Length']) req.headers[TGT_ETAG_SYSMETA_SYMLINK_HDR] = etag if not req.headers.get('Content-Type'): req.headers['Content-Type'] = response_headers['Content-Type']
def _update_or_create(self, req, broker, timestamp): if not os.path.exists(broker.db_file): try: broker.initialize(timestamp) except DatabaseAlreadyExists: pass else: return True # created created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): raise HTTPConflict(request=req) return created
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj = \ split_and_validate_path(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self._diskfile(device, partition, account, container, obj) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) with disk_file.open(): if disk_file.is_deleted() or disk_file.is_expired(): return HTTPNotFound(request=request) try: disk_file.get_data_file_size() except (DiskFileError, DiskFileNotExist): disk_file.quarantine() return HTTPNotFound(request=request) orig_metadata = disk_file.get_metadata() orig_timestamp = orig_metadata.get('X-Timestamp', '0') if orig_timestamp >= request.headers['x-timestamp']: return HTTPConflict(request=request) metadata = {'X-Timestamp': request.headers['x-timestamp']} metadata.update(val for val in request.headers.iteritems() if val[0].startswith('X-Object-Meta-')) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] old_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if old_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device) if old_delete_at: self.delete_at_update('DELETE', old_delete_at, account, container, obj, request, device) disk_file.put_metadata(metadata) return HTTPAccepted(request=request)
def handle_get_head(self, req): """ Handle get/head request and in case the response is a symlink, redirect request to target object. :param req: HTTP GET or HEAD object request :returns: Response Iterator """ try: return self._recursive_get_head(req) except LinkIterError: errmsg = 'Too many levels of symbolic links, ' \ 'maximum allowed is %d' % self.symloop_max raise HTTPConflict( body=errmsg, request=req, content_type='text/plain')
def PUT(self, request): try: # hack for supporting multi-part. create dir during initialization content_length = int(request.headers.get('Content-Length', -1)) authorization = request.headers.get('Authorization', '') if content_length == 0 and 'AWS' in authorization: device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) if container.endswith("+segments"): request.headers["Content-Type"] = 'application/directory' # now call swift's PUT method return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def _update_or_create(self, req, broker, timestamp, new_container_policy): if not os.path.exists(broker.db_file): try: broker.initialize(timestamp, new_container_policy) except DatabaseAlreadyExists: pass else: return True # created created = broker.is_deleted() if created: # only set storage policy on deleted containers broker.set_storage_policy_index(new_container_policy) broker.update_put_timestamp(timestamp) if broker.is_deleted(): raise HTTPConflict(request=req) return created
def PUT(self, request): try: device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) # check swiftonfile constraints first error_response = check_object_creation(request, obj) if error_response: return error_response # now call swift's PUT method return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def PUT(self, req): """ Handles incoming PUT requests Crawlers running on the object/container/account servers will send over new metadata. This is where that new metadata is sent to the database """ broker = self._get_metadata_broker() # Call broker insertion if 'user-agent' not in req.headers: return HTTPBadRequest(body='No user agent specified', request=req, content_type='text/plain') md_type = req.headers['user-agent'] md_data = json.loads(req.body) if not os.path.exists(broker.db_file): try: broker.initialize(time.time()) # created = True except DatabaseAlreadyExists: # created = False pass else: #created = broker.is_deleted(md_type) # broker.update_put_timestamp(time.time()) if broker.is_deleted(md_type): return HTTPConflict(request=req) # check the user agent type if md_type == 'account_crawler': # insert accounts broker.insert_account_md(md_data) elif md_type == 'container_crawler': # Insert containers broker.insert_container_md(md_data) elif md_type == 'object_crawler': # Insert object broker.insert_object_md(md_data) else: # raise exception return HTTPBadRequest(body='Invalid user agent', request=req, content_type='text/plain') return HTTPNoContent(request=req)
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = orig_metadata.get('X-Timestamp', '0') if orig_timestamp >= request.headers['x-timestamp']: return HTTPConflict(request=request) metadata = {'X-Timestamp': request.headers['x-timestamp']} metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device) disk_file.write_metadata(metadata) return HTTPAccepted(request=request)
def _store_object(self, req, data_source, headers): kwargs = {} content_type = req.headers.get('content-type', 'octet/stream') policy = None container_info = self.container_info(self.account_name, self.container_name, req) if 'X-Oio-Storage-Policy' in req.headers: policy = req.headers.get('X-Oio-Storage-Policy') if not self.app.POLICIES.get_by_name(policy): raise HTTPBadRequest( "invalid policy '%s', must be in %s" % (policy, self.app.POLICIES.by_name.keys())) else: try: policy_index = int( req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy'])) except TypeError: policy_index = 0 if policy_index != 0: policy = self.app.POLICIES.get_by_index(policy_index).name else: content_length = int(req.headers.get('content-length', 0)) policy = self._get_auto_policy_from_size(content_length) ct_props = {'properties': {}, 'system': {}} metadata = self.load_object_metadata(headers) oio_headers = {REQID_HEADER: self.trans_id} oio_cache = req.environ.get('oio.cache') perfdata = req.environ.get('swift.perfdata') # only send headers if needed if SUPPORT_VERSIONING and headers.get(FORCEVERSIONING_HEADER): oio_headers[FORCEVERSIONING_HEADER] = \ headers.get(FORCEVERSIONING_HEADER) if req.environ.get('oio.force-version'): # In a case of MPU, it contains version of the UploadId # to be able to include version-id of MPU in S3 reponse kwargs['version'] = req.environ.get('oio.force-version') # In case a shard is being created, save the name of the S3 bucket # in a container property. This will be used when aggregating # container statistics to make bucket statistics. if BUCKET_NAME_HEADER in headers: bname = headers[BUCKET_NAME_HEADER] # FIXME(FVE): the segments container is not part of another bucket! # We should not have to strip this here. if bname and bname.endswith(MULTIUPLOAD_SUFFIX): bname = bname[:-len(MULTIUPLOAD_SUFFIX)] ct_props['system'][BUCKET_NAME_PROP] = bname try: _chunks, _size, checksum, _meta = self._object_create( self.account_name, self.container_name, obj_name=self.object_name, file_or_path=data_source, mime_type=content_type, policy=policy, headers=oio_headers, etag=req.headers.get('etag', '').strip('"'), properties=metadata, container_properties=ct_props, cache=oio_cache, perfdata=perfdata, **kwargs) # TODO(FVE): when oio-sds supports it, do that in a callback # passed to object_create (or whatever upload method supports it) footer_md = self.load_object_metadata(self._get_footers(req)) if footer_md: self.app.storage.object_set_properties(self.account_name, self.container_name, self.object_name, version=_meta.get( 'version', None), properties=footer_md, headers=oio_headers, cache=oio_cache, perfdata=perfdata) except exceptions.Conflict: raise HTTPConflict(request=req) except exceptions.PreconditionFailed: raise HTTPPreconditionFailed(request=req) except SourceReadTimeout as err: self.app.logger.warning(_('ERROR Client read timeout (%s)'), err) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) except exceptions.SourceReadError: req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) except exceptions.EtagMismatch: return HTTPUnprocessableEntity(request=req) except (exceptions.ServiceBusy, exceptions.OioTimeout, exceptions.DeadlineReached): raise except exceptions.NoSuchContainer: raise HTTPNotFound(request=req) except exceptions.ClientException as err: # 481 = CODE_POLICY_NOT_SATISFIABLE if err.status == 481: raise exceptions.ServiceBusy() self.app.logger.exception( _('ERROR Exception transferring data %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) except Exception: self.app.logger.exception( _('ERROR Exception transferring data %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) last_modified = int(_meta.get('mtime', math.ceil(time.time()))) # FIXME(FVE): if \x10 character in object name, decode version # number and set it in the response headers, instead of the oio # version number. version_id = _meta.get('version', 'null') resp = HTTPCreated(request=req, etag=checksum, last_modified=last_modified, headers={'x-object-sysmeta-version-id': version_id}) return resp
def _link_object(self, req): _, container, obj = req.headers['Oio-Copy-From'].split('/', 2) from_account = req.headers.get('X-Copy-From-Account', self.account_name) self.app.logger.info( "Creating link from %s/%s/%s to %s/%s/%s", # Existing from_account, container, obj, # New self.account_name, self.container_name, self.object_name) storage = self.app.storage if req.headers.get('Range'): raise Exception("Fast Copy with Range is unsupported") ranges = ranges_from_http_header(req.headers.get('Range')) if len(ranges) != 1: raise HTTPInternalServerError( request=req, body="mutiple ranges unsupported") ranges = ranges[0] else: ranges = None headers = self._prepare_headers(req) metadata = self.load_object_metadata(headers) oio_headers = {REQID_HEADER: self.trans_id} oio_cache = req.environ.get('oio.cache') perfdata = req.environ.get('swift.perfdata') # FIXME(FVE): use object_show, cache in req.environ version = obj_version_from_env(req.environ) props = storage.object_get_properties(from_account, container, obj, headers=oio_headers, version=version, cache=oio_cache, perfdata=perfdata) if props['properties'].get(SLO, None): raise Exception("Fast Copy with SLO is unsupported") else: if ranges: raise HTTPInternalServerError( request=req, body="no range supported with single object") try: # TODO check return code (values ?) link_meta = storage.object_link(from_account, container, obj, self.account_name, self.container_name, self.object_name, headers=oio_headers, properties=metadata, properties_directive='REPLACE', target_version=version, cache=oio_cache, perfdata=perfdata) # TODO(FVE): this exception catching block has to be refactored # TODO check which ones are ok or make non sense except exceptions.Conflict: raise HTTPConflict(request=req) except exceptions.PreconditionFailed: raise HTTPPreconditionFailed(request=req) except exceptions.SourceReadError: req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) except exceptions.EtagMismatch: return HTTPUnprocessableEntity(request=req) except (exceptions.ServiceBusy, exceptions.OioTimeout, exceptions.DeadlineReached): raise except (exceptions.NoSuchContainer, exceptions.NotFound): raise HTTPNotFound(request=req) except exceptions.ClientException as err: # 481 = CODE_POLICY_NOT_SATISFIABLE if err.status == 481: raise exceptions.ServiceBusy() self.app.logger.exception( _('ERROR Exception transferring data %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) except Exception: self.app.logger.exception( _('ERROR Exception transferring data %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) resp = HTTPCreated(request=req, etag=link_meta['hash']) return resp
def get_or_head_response(self, req, x_object_manifest, response_headers=None): if response_headers is None: response_headers = self._response_headers container, obj_prefix = x_object_manifest.split('/', 1) container = unquote(container) obj_prefix = unquote(obj_prefix) version, account, _junk = req.split_path(2, 3, True) error_response, segments = self._get_container_listing( req, version, account, container, obj_prefix) if error_response: return error_response have_complete_listing = len(segments) < \ constraints.CONTAINER_LISTING_LIMIT first_byte = last_byte = None actual_content_length = None content_length_for_swob_range = None if req.range and len(req.range.ranges) == 1: content_length_for_swob_range = sum(o['bytes'] for o in segments) # This is a hack to handle suffix byte ranges (e.g. "bytes=-5"), # which we can't honor unless we have a complete listing. _junk, range_end = req.range.ranges_for_length(float("inf"))[0] # If this is all the segments, we know whether or not this # range request is satisfiable. # # Alternately, we may not have all the segments, but this range # falls entirely within the first page's segments, so we know # that it is satisfiable. if (have_complete_listing or range_end < content_length_for_swob_range): byteranges = req.range.ranges_for_length( content_length_for_swob_range) if not byteranges: headers = {'Accept-Ranges': 'bytes'} if have_complete_listing: headers['Content-Range'] = 'bytes */%d' % ( content_length_for_swob_range, ) return HTTPRequestedRangeNotSatisfiable(request=req, headers=headers) first_byte, last_byte = byteranges[0] # For some reason, swob.Range.ranges_for_length adds 1 to the # last byte's position. last_byte -= 1 actual_content_length = last_byte - first_byte + 1 else: # The range may or may not be satisfiable, but we can't tell # based on just one page of listing, and we're not going to go # get more pages because that would use up too many resources, # so we ignore the Range header and return the whole object. actual_content_length = None content_length_for_swob_range = None req.range = None response_headers = [(h, v) for h, v in response_headers if h.lower() not in ("content-length", "content-range")] if content_length_for_swob_range is not None: # Here, we have to give swob a big-enough content length so that # it can compute the actual content length based on the Range # header. This value will not be visible to the client; swob will # substitute its own Content-Length. # # Note: if the manifest points to at least CONTAINER_LISTING_LIMIT # segments, this may be less than the sum of all the segments' # sizes. However, it'll still be greater than the last byte in the # Range header, so it's good enough for swob. response_headers.append( ('Content-Length', str(content_length_for_swob_range))) elif have_complete_listing: actual_content_length = sum(o['bytes'] for o in segments) response_headers.append( ('Content-Length', str(actual_content_length))) if have_complete_listing: response_headers = [(h, v) for h, v in response_headers if h.lower() != "etag"] etag = md5() for seg_dict in segments: etag.update(seg_dict['hash'].strip('"')) response_headers.append(('Etag', '"%s"' % etag.hexdigest())) app_iter = None if req.method == 'GET': listing_iter = RateLimitedIterator( self._segment_listing_iterator(req, version, account, container, obj_prefix, segments, first_byte=first_byte, last_byte=last_byte), self.dlo.rate_limit_segments_per_sec, limit_after=self.dlo.rate_limit_after_segment) app_iter = SegmentedIterable( req, self.dlo.app, listing_iter, ua_suffix="DLO MultipartGET", swift_source="DLO", name=req.path, logger=self.logger, max_get_time=self.dlo.max_get_time, response_body_length=actual_content_length) try: app_iter.validate_first_segment() except (SegmentError, ListingIterError): return HTTPConflict(request=req) resp = Response(request=req, headers=response_headers, conditional_response=True, app_iter=app_iter) return resp
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if '*' in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 def timeout_reader(): with ChunkReadTimeout(self.client_timeout): return request.environ['wsgi.input'].read( self.network_chunk_size) try: for chunk in iter(lambda: timeout_reader(), ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate('PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if 'etag' in request.headers and \ request.headers['etag'].lower() != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta('object', val[0])) headers_to_copy = (request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy_idx) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy_idx) self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag'] }), device, policy_idx) return HTTPCreated(request=request, etag=etag)
def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container, obj = split_and_validate_path( req, 4, 5, True) if 'x-timestamp' not in req.headers or \ not check_float(req.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=req, content_type='text/plain') if 'x-container-sync-to' in req.headers: err = validate_sync_to(req.headers['x-container-sync-to'], self.allowed_sync_hosts) if err: return HTTPBadRequest(err) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) timestamp = normalize_timestamp(req.headers['x-timestamp']) broker = self._get_container_broker(drive, part, account, container) if obj: # put container object if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(timestamp) except DatabaseAlreadyExists: pass if not os.path.exists(broker.db_file): return HTTPNotFound() broker.put_object(obj, timestamp, int(req.headers['x-size']), req.headers['x-content-type'], req.headers['x-etag']) return HTTPCreated(request=req) else: # put container if not os.path.exists(broker.db_file): try: broker.initialize(timestamp) created = True except DatabaseAlreadyExists: created = False else: created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp)) for key, value in req.headers.iteritems() if key.lower() in self.save_headers or is_sys_or_user_meta('container', key)) if metadata: if 'X-Container-Sync-To' in metadata: if 'X-Container-Sync-To' not in broker.metadata or \ metadata['X-Container-Sync-To'][0] != \ broker.metadata['X-Container-Sync-To'][0]: broker.set_x_container_sync_points(-1, -1) broker.update_metadata(metadata) resp = self.account_update(req, account, container, broker) if resp: return resp if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req)
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj = \ split_and_validate_path(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') try: disk_file = self._diskfile(device, partition, account, container, obj) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) old_delete_at = int(disk_file.metadata.get('X-Delete-At') or 0) orig_timestamp = disk_file.metadata.get('X-Timestamp') if orig_timestamp and orig_timestamp >= request.headers['x-timestamp']: return HTTPConflict(request=request) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: reader = request.environ['wsgi.input'].read for chunk in iter(lambda: reader(self.network_chunk_size), ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) writer.write(chunk) sleep() elapsed_time += time.time() - start_time upload_size = writer.upload_size if upload_size: self.logger.transfer_rate('PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if 'etag' in request.headers and \ request.headers['etag'].lower() != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.headers['x-timestamp'], 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.iteritems() if val[0].lower().startswith('x-object-meta-') and len(val[0]) > 14) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) except DiskFileNoSpace: return HTTPInsufficientStorage(drive=device, request=request) if old_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device) if old_delete_at: self.delete_at_update('DELETE', old_delete_at, account, container, obj, request, device) if not orig_timestamp or \ orig_timestamp < request.headers['x-timestamp']: self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ 'x-size': disk_file.metadata['Content-Length'], 'x-content-type': disk_file.metadata['Content-Type'], 'x-timestamp': disk_file.metadata['X-Timestamp'], 'x-etag': disk_file.metadata['ETag'] }), device) resp = HTTPCreated(request=request, etag=etag) return resp
else: return HTTPCreated(request=req) else: # put account timestamp = normalize_timestamp(req.headers['x-timestamp']) if not os.path.exists(broker.db_file): broker.initialize(timestamp) created = True elif broker.is_status_deleted(): self.logger.timing_since('PUT.timing', start_time) return HTTPForbidden(request=req, body='Recently deleted') else: created = broker.is_deleted() broker.update_put_timestamp(timestamp) if broker.is_deleted(): self.logger.increment('PUT.errors') return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp)) for key, value in req.headers.iteritems() if key.lower().startswith('x-account-meta-')) if metadata: broker.update_metadata(metadata) self.logger.timing_since('PUT.timing', start_time) if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req) @public def HEAD(self, req): """Handle HTTP HEAD request."""
def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container = split_and_validate_path(req, 3, 4) try: check_drive(self.root, drive, self.mount_check) except ValueError: return HTTPInsufficientStorage(drive=drive, request=req) if not self.check_free_space(drive): return HTTPInsufficientStorage(drive=drive, request=req) if container: # put account container if 'x-timestamp' not in req.headers: timestamp = Timestamp.now() else: timestamp = valid_timestamp(req) pending_timeout = None container_policy_index = \ req.headers.get('X-Backend-Storage-Policy-Index', 0) if 'x-trans-id' in req.headers: pending_timeout = 3 broker = self._get_account_broker(drive, part, account, pending_timeout=pending_timeout) if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(timestamp.internal) except DatabaseAlreadyExists: pass if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used'], container_policy_index) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account timestamp = valid_timestamp(req) broker = self._get_account_broker(drive, part, account) if not os.path.exists(broker.db_file): try: broker.initialize(timestamp.internal) created = True except DatabaseAlreadyExists: created = False elif broker.is_status_deleted(): return self._deleted_response(broker, req, HTTPForbidden, body='Recently deleted') else: created = broker.is_deleted() broker.update_put_timestamp(timestamp.internal) if broker.is_deleted(): return HTTPConflict(request=req) self._update_metadata(req, broker, timestamp) if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req)