def test_get_name_and_placement_object_replication(self): # yup, suffixes are sent '-'.joined in the path path = '/device/part/012-345-678-9ab-cde' req = Request.blank(path, headers={'X-Backend-Storage-Policy-Index': '0'}) device, partition, suffix_parts, policy = \ rh.get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertEqual(suffix_parts, '012-345-678-9ab-cde') self.assertEqual(policy, POLICIES[0]) self.assertEqual(policy.policy_type, EC_POLICY) path = '/device/part' req = Request.blank(path, headers={'X-Backend-Storage-Policy-Index': '1'}) device, partition, suffix_parts, policy = \ rh.get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertIsNone(suffix_parts) # false-y self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY) path = '/device/part/' # with a trailing slash req = Request.blank(path, headers={'X-Backend-Storage-Policy-Index': '1'}) device, partition, suffix_parts, policy = \ rh.get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertEqual(suffix_parts, '') # still false-y self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY)
def test_get_name_and_placement_object_req(self): path = '/device/part/account/container/object' req = Request.blank(path, headers={ 'X-Backend-Storage-Policy-Index': '0'}) device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[0]) self.assertEqual(policy.policy_type, EC_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 1 device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 'foo' with self.assertRaises(HTTPException) as raised: device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) e = raised.exception self.assertEqual(e.status_int, 503) self.assertEqual(str(e), '503 Service Unavailable') self.assertEqual(e.body, b"No policy with index foo")
def test_get_name_and_placement_object_req(self): path = '/device/part/account/container/object' req = Request.blank(path, headers={'X-Backend-Storage-Policy-Index': '0'}) device, part, account, container, obj, policy = \ rh.get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[0]) self.assertEqual(policy.policy_type, EC_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 1 device, part, account, container, obj, policy = \ rh.get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 'foo' with self.assertRaises(HTTPException) as raised: device, part, account, container, obj, policy = \ rh.get_name_and_placement(req, 5, 5, True) e = raised.exception self.assertEqual(e.status_int, 503) self.assertEqual(str(e), '503 Service Unavailable') self.assertEqual(e.body, b"No policy with index foo")
def test_get_name_and_placement_object_replication(self): # yup, suffixes are sent '-'.joined in the path path = '/device/part/012-345-678-9ab-cde' req = Request.blank(path, headers={ 'X-Backend-Storage-Policy-Index': '0'}) device, partition, suffix_parts, policy = \ get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertEqual(suffix_parts, '012-345-678-9ab-cde') self.assertEqual(policy, POLICIES[0]) self.assertEqual(policy.policy_type, EC_POLICY) path = '/device/part' req = Request.blank(path, headers={ 'X-Backend-Storage-Policy-Index': '1'}) device, partition, suffix_parts, policy = \ get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertEqual(suffix_parts, None) # false-y self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY) path = '/device/part/' # with a trailing slash req = Request.blank(path, headers={ 'X-Backend-Storage-Policy-Index': '1'}) device, partition, suffix_parts, policy = \ get_name_and_placement(req, 2, 3, True) self.assertEqual(device, 'device') self.assertEqual(partition, 'part') self.assertEqual(suffix_parts, '') # still false-y self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY)
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileExpired as e: orig_timestamp = e.timestamp orig_metadata = e.metadata response_class = HTTPNotFound except DiskFileDeleted as e: orig_timestamp = e.timestamp orig_metadata = {} response_class = HTTPNotFound except (DiskFileNotExist, DiskFileQuarantined): orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound else: orig_timestamp = orig_metadata.get('X-Timestamp', 0) if orig_timestamp < request.headers['x-timestamp']: response_class = HTTPNoContent else: response_class = HTTPConflict orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at_val = request.headers['x-if-delete-at'] req_if_delete_at = int(req_if_delete_at_val) except KeyError: pass except ValueError: return HTTPBadRequest( request=request, body='Bad X-If-Delete-At header value') else: if orig_delete_at != req_if_delete_at: return HTTPPreconditionFailed( request=request, body='X-If-Delete-At and X-Delete-At do not match') if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device) req_timestamp = request.headers['X-Timestamp'] if orig_timestamp < req_timestamp: disk_file.delete(req_timestamp) self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp}), device, policy_idx) return response_class(request=request)
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) response_timestamp = req_timestamp #orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at_val = request.headers['x-if-delete-at'] req_if_delete_at = int(req_if_delete_at_val) except KeyError: pass except ValueError: return HTTPBadRequest( request=request, body='Bad X-If-Delete-At header value') cloud_container_info = request.headers.get('X-Storage-Container') cloud_container = cloud_container_info.strip().split('/') if not delete_file(cloud_container[0], cloud_container[1], obj): return HTTPNotFound(request=request) self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp.internal}), device, policy) return HTTPNoContent( request=request, headers={'X-Backend-Timestamp': response_timestamp.internal})
def _sof_container_update(self, request, resp): """ SOF specific metadata is set in DiskFile.open()._filter_metadata() This method internally invokes Swift's container_update() method. """ device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) # The container_update() method requires certain container # specific headers. The proxy object controller appends these # headers for PUT backend request but not for HEAD/GET requests. # Thus, we populate the required information in request # and then invoke container_update() container_partition, container_nodes = \ self.get_container_ring().get_nodes(account, container) request.headers['X-Container-Partition'] = container_partition for node in container_nodes: request.headers['X-Container-Host'] = csv_append( request.headers.get('X-Container-Host'), '%(ip)s:%(port)s' % node) request.headers['X-Container-Device'] = csv_append( request.headers.get('X-Container-Device'), node['device']) self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ 'x-size': resp.headers['Content-Length'], 'x-content-type': resp.headers['Content-Type'], 'x-timestamp': resp.headers['X-Timestamp'], 'x-etag': resp.headers['ETag']}), device, policy_idx)
def HEAD(self, request): """Handle HTTP HEAD requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = get_name_and_placement(request, 5, 5, True) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, "timestamp"): headers["X-Backend-Timestamp"] = e.timestamp.internal return HTTPNotFound(request=request, headers=headers, conditional_response=True) response = Response(request=request, conditional_response=True) response.headers["Content-Type"] = metadata.get("Content-Type", "application/octet-stream") for key, value in metadata.iteritems(): if is_sys_or_user_meta("object", key) or key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata["ETag"] ts = Timestamp(metadata["X-Timestamp"]) response.last_modified = math.ceil(float(ts)) # Needed for container sync feature response.headers["X-Timestamp"] = ts.normal response.headers["X-Backend-Timestamp"] = ts.internal response.content_length = int(metadata["Content-Length"]) try: response.content_encoding = metadata["Content-Encoding"] except KeyError: pass return response
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers) cloud_container_info = request.headers.get('X-Storage-Container') cloud_container = cloud_container_info.strip().split('/') obj_iter = get_file(cloud_container[0], cloud_container[1], obj) conditional_etag = md5() if None == obj_iter: return HTTPNotFound(request=request) response = Response( app_iter=obj_iter, request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = 'application/octet-stream' #response.etag = metadata['ETag'] #response.last_modified = math.ceil(float(file_x_ts)) #response.content_length = obj_size #response.headers['X-Timestamp'] = file_x_ts.normal #response.headers['X-Backend-Timestamp'] = file_x_ts.internal resp = request.get_response(response) return resp
def initialize_request(self): """ Basic validation of request and mount check. This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ # This environ override has been supported since eventlet 0.14: # https://bitbucket.org/eventlet/eventlet/commits/ \ # 4bd654205a4217970a57a7c4802fed7ff2c8b770 self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) self.frag_index = self.node_index = None if self.request.headers.get('X-Backend-Ssync-Frag-Index'): self.frag_index = int( self.request.headers['X-Backend-Ssync-Frag-Index']) if self.request.headers.get('X-Backend-Ssync-Node-Index'): self.node_index = int( self.request.headers['X-Backend-Ssync-Node-Index']) if self.node_index != self.frag_index: # a primary node should only receive it's own fragments raise swob.HTTPBadRequest( 'Frag-Index (%s) != Node-Index (%s)' % (self.frag_index, self.node_index)) utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input']
def initialize_request(self): """ Basic validation of request and mount check. This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ # This environ override has been supported since eventlet 0.14: # https://bitbucket.org/eventlet/eventlet/commits/ \ # 4bd654205a4217970a57a7c4802fed7ff2c8b770 self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) self.frag_index = self.node_index = None if self.request.headers.get('X-Backend-Ssync-Frag-Index'): self.frag_index = int( self.request.headers['X-Backend-Ssync-Frag-Index']) if self.request.headers.get('X-Backend-Ssync-Node-Index'): self.node_index = int( self.request.headers['X-Backend-Ssync-Node-Index']) if self.node_index != self.frag_index: # a primary node should only recieve it's own fragments raise swob.HTTPBadRequest( 'Frag-Index (%s) != Node-Index (%s)' % ( self.frag_index, self.node_index)) utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input']
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): metadata = disk_file.get_metadata() obj_size = int(metadata['Content-Length']) file_x_ts = Timestamp(metadata['X-Timestamp']) keep_cache = (self.keep_cache_private or ('X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers)) conditional_etag = None if 'X-Backend-Etag-Is-At' in request.headers: conditional_etag = metadata.get( request.headers['X-Backend-Etag-Is-At']) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass response.headers['X-Timestamp'] = file_x_ts.normal response.headers['X-Backend-Timestamp'] = file_x_ts.internal resp = request.get_response(response) except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal resp = HTTPNotFound(request=request, headers=headers, conditional_response=True) return resp
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) print 'request',request print 'device, partition, account, container,obj, policy', device,partition,account,container,obj,policy keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers) try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) print 'disk_file',disk_file except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): metadata = disk_file.get_metadata() obj_size = int(metadata['Content-Length']) file_x_ts = Timestamp(metadata['X-Timestamp']) keep_cache = (self.keep_cache_private or ('X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers)) conditional_etag = None if 'X-Backend-Etag-Is-At' in request.headers: conditional_etag = metadata.get( request.headers['X-Backend-Etag-Is-At']) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata[ 'Content-Encoding'] except KeyError: pass response.headers['X-Timestamp'] = file_x_ts.normal response.headers['X-Backend-Timestamp'] = file_x_ts.internal resp = request.get_response(response) except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal resp = HTTPNotFound(request=request, headers=headers, conditional_response=True) print 'resp',resp return resp
def HEAD(self, request): """Handle HTTP HEAD requests for the Swift on File object server""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) # Get DiskFile try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) # Read DiskFile metadata try: disk_file.open() metadata = disk_file.get_metadata() except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal return HTTPNotFound(request=request, headers=headers, conditional_respose=True) # Create and populate our response response = Response(request=request, conditional_response=True) response.headers['Content-Type'] = \ metadata.get('Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or key.lower() in \ self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] ts = Timestamp(metadata['X-Timestamp']) response.last_modified = math.ceil(float(ts)) # Needed for container sync feature response.headers['X-Timestamp'] = ts.normal response.headers['X-Backend-Timestamp'] = ts.internal response.content_length = int(metadata['Content-Length']) try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass # (HPSS) Inject HPSS xattr metadata into headers want_hpss_metadata = request.headers.get('X-HPSS-Get-Metadata', False) if config_true_value(want_hpss_metadata): try: hpss_headers = disk_file.read_hpss_system_metadata() response.headers.update(hpss_headers) except SwiftOnFileSystemIOError: return HTTPServiceUnavailable(request=request) if 'X-Object-Sysmeta-Update-Container' in response.headers: self._sof_container_update(request, response) response.headers.pop('X-Object-Sysmeta-Update-Container') return response
def POST(self, request): """Handle HTTP POST requests for the Swift on File object server""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') # Get DiskFile try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy, uid=int(self.hpss_uid), gid=int(self.hpss_gid)) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) # Set class of service if we got it new_cos = request.headers.get('X-HPSS-Class-Of-Service-Id', None) if new_cos: disk_file.set_cos(int(new_cos)) # Set purge lock status if we got it if self.allow_purgelock: purge_lock = request.headers.get('X-HPSS-Purgelock-Status', None) if purge_lock is not None: disk_file.set_purge_lock(purge_lock) # Update metadata from request try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: backend_headers = {'X-Backend-Timestamp': orig_timestamp.internal} return HTTPConflict(request=request, headers=backend_headers) metadata = {'X-Timestamp': req_timestamp.internal} metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) disk_file.write_metadata(metadata) return HTTPAccepted(request=request)
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: # 获取文件对象管理器实例 disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: # 读取原始元数据 orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) # 对象文件不存在,报错 except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} # 保存大对象的manifest,为什么??? self._preserve_slo_manifest(metadata, orig_metadata) # 保存请求中用户自定义的元数据到字典中 metadata.update(val for val in request.headers.items() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() # 保存请求中可允许更改的系统元数据 metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) try: # 将元数据更新到对象的扩展属性 disk_file.write_metadata(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) return HTTPAccepted(request=request)
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} self._preserve_slo_manifest(metadata, orig_metadata) metadata.update(val for val in request.headers.items() if is_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) try: disk_file.write_metadata(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) return HTTPAccepted(request=request)
def REPLICATE(self, request): """ Handle REPLICATE requests for the Swift Object Server. This is used by the object replicator to get hashes for directories. """ device, partition, suffix, policy_idx = get_name_and_placement(request, 2, 3, True) try: hashes = self._diskfile_mgr.get_hashes(device, partition, suffix, policy_idx) except DiskFileDeviceUnavailable: resp = HTTPInsufficientStorage(drive=device, request=request) else: resp = Response(body=pickle.dumps(hashes)) return resp
def HEAD(self, request): """Handle HTTP HEAD requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal return HTTPNotFound(request=request, headers=headers, conditional_response=True) conditional_etag = None if 'X-Backend-Etag-Is-At' in request.headers: conditional_etag = metadata.get( request.headers['X-Backend-Etag-Is-At']) response = Response(request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] ts = Timestamp(metadata['X-Timestamp']) response.last_modified = math.ceil(float(ts)) # Needed for container sync feature response.headers['X-Timestamp'] = ts.normal response.headers['X-Backend-Timestamp'] = ts.internal response.content_length = int(metadata['Content-Length']) try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass return response
def REPLICATE(self, request): """ Handle REPLICATE requests for the Swift Object Server. This is used by the object replicator to get hashes for directories. """ device, partition, suffix, policy_idx = \ get_name_and_placement(request, 2, 3, True) try: hashes = self._diskfile_mgr.get_hashes(device, partition, suffix, policy_idx) except DiskFileDeviceUnavailable: resp = HTTPInsufficientStorage(drive=device, request=request) else: resp = Response(body=pickle.dumps(hashes)) return resp
def test_get_name_and_placement_object_req(self): path = '/device/part/account/container/object' req = Request.blank(path, headers={'X-Backend-Storage-Policy-Index': '0'}) device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[0]) self.assertEqual(policy.policy_type, EC_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 1 device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) self.assertEqual(device, 'device') self.assertEqual(part, 'part') self.assertEqual(account, 'account') self.assertEqual(container, 'container') self.assertEqual(obj, 'object') self.assertEqual(policy, POLICIES[1]) self.assertEqual(policy.policy_type, REPL_POLICY) req.headers['X-Backend-Storage-Policy-Index'] = 'foo' try: device, part, account, container, obj, policy = \ get_name_and_placement(req, 5, 5, True) except HTTPException as e: self.assertEqual(e.status_int, 503) self.assertEqual(str(e), '503 Service Unavailable') self.assertEqual(e.body, "No policy with index foo") else: self.fail('get_name_and_placement did not raise error ' 'for invalid storage policy index')
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) print("----------------------------------------------------: In POST") req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) metadata = {'X-Timestamp': req_timestamp.internal} metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy_idx) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy_idx) try: disk_file.write_metadata(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) return HTTPAccepted(request=request)
def get_data_dir(vertigo): """ Gets the data directory full path :param vertigo: swift_vertigo.vertigo_handler.VertigoObjectHandler instance :returns: the data directory path """ devices = vertigo.conf.get('devices') device, partition, account, container, obj, policy = \ get_name_and_placement(vertigo.request, 5, 5, True) name_hash = hash_path(account, container, obj) device_path = os.path.join(devices, device) storage_dir = storage_directory(df_data_dir(policy), partition, name_hash) data_dir = os.path.join(device_path, storage_dir) return data_dir
def PUT(self, request): try: # hack for supporting multi-part. create dir during initialization content_length = int(request.headers.get('Content-Length', -1)) authorization = request.headers.get('Authorization', '') if content_length == 0 and 'AWS' in authorization: device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) if container.endswith("+segments"): request.headers["Content-Type"] = 'application/directory' # now call swift's PUT method return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def PUT(self, request): try: device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) # check swiftonfile constraints first error_response = check_object_creation(request, obj) if error_response: return error_response # now call swift's PUT method return server.ObjectController.PUT(self, request) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( "X-Auth-Token" not in request.headers and "X-Storage-Token" not in request.headers ) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): metadata = disk_file.get_metadata() obj_size = int(metadata["Content-Length"]) file_x_ts = Timestamp(metadata["X-Timestamp"]) keep_cache = self.keep_cache_private or ( "X-Auth-Token" not in request.headers and "X-Storage-Token" not in request.headers ) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True ) response.headers["Content-Type"] = metadata.get("Content-Type", "application/octet-stream") for key, value in metadata.iteritems(): if is_sys_or_user_meta("object", key) or key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata["ETag"] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata["Content-Encoding"] except KeyError: pass response.headers["X-Timestamp"] = file_x_ts.normal response.headers["X-Backend-Timestamp"] = file_x_ts.internal resp = request.get_response(response) except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, "timestamp"): headers["X-Backend-Timestamp"] = e.timestamp.internal resp = HTTPNotFound(request=request, headers=headers, conditional_response=True) return resp
def POST(self, request): """Handle HTTP POST requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) if 'x-timestamp' not in request.headers or \ not check_float(request.headers['x-timestamp']): return HTTPBadRequest(body='Missing timestamp', request=request, content_type='text/plain') new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = orig_metadata.get('X-Timestamp', '0') if orig_timestamp >= request.headers['x-timestamp']: return HTTPConflict(request=request) metadata = {'X-Timestamp': request.headers['x-timestamp']} metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device) disk_file.write_metadata(metadata) return HTTPAccepted(request=request)
def REPLICATE(self, request): """ Handle REPLICATE requests for the Swift Object Server. This is used by the object replicator to get hashes for directories. Note that the name REPLICATE is preserved for historical reasons as this verb really just returns the hashes information for the specified parameters and is used, for example, by both replication and EC. """ device, partition, suffix_parts, policy = \ get_name_and_placement(request, 2, 3, True) suffixes = suffix_parts.split('-') if suffix_parts else [] try: hashes = self._diskfile_router[policy].get_hashes( device, partition, suffixes, policy) except DiskFileDeviceUnavailable: resp = HTTPInsufficientStorage(drive=device, request=request) else: resp = Response(body=pickle.dumps(hashes)) return resp
def initialize_request(self): """ Basic validation of request and mount check. This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ # This environ override has been supported since eventlet 0.14: # https://bitbucket.org/eventlet/eventlet/commits/ \ # 4bd654205a4217970a57a7c4802fed7ff2c8b770 self.request.environ["eventlet.minimum_write_chunk_size"] = 0 self.device, self.partition, self.policy = request_helpers.get_name_and_placement(self.request, 2, 2, False) if "X-Backend-Ssync-Frag-Index" in self.request.headers: self.frag_index = int(self.request.headers["X-Backend-Ssync-Frag-Index"]) else: self.frag_index = None utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ["wsgi.input"]
def initialize_request(self): """ Basic validation of request and mount check. This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ # The following is the setting we talk about above in _ensure_flush. self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) if 'X-Backend-Ssync-Frag-Index' in self.request.headers: self.frag_index = int( self.request.headers['X-Backend-Ssync-Frag-Index']) else: self.frag_index = None utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if not self.diskfile_mgr.get_dev_path(self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input'] for data in self._ensure_flush(): yield data
def HEAD(self, request): """Handle HTTP HEAD requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Timestamp'] = e.timestamp return HTTPNotFound(request=request, headers=headers, conditional_response=True) response = Response(request=request, conditional_response=True) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] ts = metadata['X-Timestamp'] response.last_modified = math.ceil(float(ts)) # Needed for container sync feature response.headers['X-Timestamp'] = ts response.content_length = int(metadata['Content-Length']) try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass return response
def initialize_request(self): """ Basic validation of request and mount check. This function will be called before attempting to acquire a replication semaphore lock, so contains only quick checks. """ # The following is the setting we talk about above in _ensure_flush. self.request.environ['eventlet.minimum_write_chunk_size'] = 0 self.device, self.partition, self.policy = \ request_helpers.get_name_and_placement(self.request, 2, 2, False) if 'X-Backend-Ssync-Frag-Index' in self.request.headers: self.frag_index = int( self.request.headers['X-Backend-Ssync-Frag-Index']) else: self.frag_index = None utils.validate_device_partition(self.device, self.partition) self.diskfile_mgr = self.app._diskfile_router[self.policy] if self.diskfile_mgr.mount_check and not constraints.check_mount( self.diskfile_mgr.devices, self.device): raise swob.HTTPInsufficientStorage(drive=self.device) self.fp = self.request.environ['wsgi.input'] for data in self._ensure_flush(): yield data
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift on File object server""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except DiskFileExpired as e: orig_timestamp = e.timestamp orig_metadata = e.metadata response_class = HTTPNotFound except DiskFileDeleted as e: orig_timestamp = e.timestamp orig_metadata = {} response_class = HTTPNotFound # If the file got deleted outside of Swift, we won't see it. # So we say "file, what file?" and delete it from the container. except DiskFileNotExist: orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound except DiskFileQuarantined: orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound else: orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp < req_timestamp: response_class = HTTPNoContent else: response_class = HTTPConflict response_timestamp = max(orig_timestamp, req_timestamp) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at = int(request.headers['X-If-Delete-At']) except KeyError: pass except ValueError: return HTTPBadRequest(request=request, body='Bad X-If-Delete-At header value') else: if not orig_timestamp: return HTTPNotFound() if orig_delete_at != req_if_delete_at: return HTTPPreconditionFailed( request=request, body='X-If-Delete-At and X-Delete-At do not match') else: response_class = HTTPNoContent if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) if orig_timestamp < req_timestamp: disk_file.delete(req_timestamp) self.container_update('DELETE', account, container, obj, request, HeaderKeyDict( {'x-timestamp': req_timestamp.internal} ), device, policy) return response_class( request=request, headers={'X-Backend-Timestamp': response_timestamp.internal} )
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') # In case of multipart-MIME put, the proxy sends a chunked request, # but may let us know the real content length so we can verify that # we have enough disk space to hold the object. if fsize is None: fsize = request.headers.get('X-Backend-Obj-Content-Length') if fsize is not None: try: fsize = int(fsize) except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') orig_metadata = {} orig_timestamp = Timestamp(0) orig_delete_at = 0 upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 cloud_container_info = request.headers.get('X-Storage-Container') cloud_container = cloud_container_info.strip().split('/') try: upload_size = 0 # If the proxy wants to send us object metadata after the # object body, it sets some headers. We have to tell the # proxy, in the 100 Continue response, that we're able to # parse a multipart MIME document and extract the object and # metadata from it. If we don't, then the proxy won't # actually send the footer metadata. have_metadata_footer = False use_multiphase_commit = False mime_documents_iter = iter([]) obj_input = request.environ['wsgi.input'] hundred_continue_headers = [] if config_true_value( request.headers.get( 'X-Backend-Obj-Multiphase-Commit')): use_multiphase_commit = True hundred_continue_headers.append( ('X-Obj-Multiphase-Commit', 'yes')) if config_true_value( request.headers.get('X-Backend-Obj-Metadata-Footer')): have_metadata_footer = True hundred_continue_headers.append( ('X-Obj-Metadata-Footer', 'yes')) if have_metadata_footer or use_multiphase_commit: obj_input.set_hundred_continue_response_headers( hundred_continue_headers) mime_boundary = request.headers.get( 'X-Backend-Obj-Multipart-Mime-Boundary') if not mime_boundary: return HTTPBadRequest("no MIME boundary") try: with ChunkReadTimeout(self.client_timeout): mime_documents_iter = iter_mime_headers_and_bodies( request.environ['wsgi.input'], mime_boundary, self.network_chunk_size) _junk_hdrs, obj_input = next(mime_documents_iter) except ChunkReadTimeout: return HTTPRequestTimeout(request=request) timeout_reader = self._make_timeout_reader(obj_input) try: start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) if have_metadata_footer: put_res = put_file(cloud_container[0], cloud_container[1], obj, iter(timeout_reader, '')) else: put_res = put_file(cloud_container[0], cloud_container[1], obj, iter(obj_input)) upload_size += put_res.size etag = put_res.hash elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate( 'PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) footer_meta = {} if have_metadata_footer: footer_meta = self._read_metadata_footer( mime_documents_iter) request_etag = (footer_meta.get('etag') or request.headers.get('etag', '')).lower() if request_etag and request_etag != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta('object', val[0])) metadata.update(val for val in footer_meta.iteritems() if is_sys_or_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] #writer.put(metadata) # if the PUT requires a two-phase commit (a data and a commit # phase) send the proxy server another 100-continue response # to indicate that we are finished writing object data if use_multiphase_commit: request.environ['wsgi.input'].\ send_hundred_continue_response() if not self._read_put_commit_message(mime_documents_iter): return HTTPServerError(request=request) # got 2nd phase confirmation, write a timestamp.durable # state file to indicate a successful PUT #writer.commit(request.timestamp) # Drain any remaining MIME docs from the socket. There # shouldn't be any, but we must read the whole request body. try: while True: with ChunkReadTimeout(self.client_timeout): _junk_hdrs, _junk_body = next(mime_documents_iter) drain(_junk_body, self.network_chunk_size, self.client_timeout) except ChunkReadTimeout: raise HTTPClientDisconnect() except StopIteration: pass except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if request_etag and request_etag != etag: return HTTPUnprocessableEntity(request=request) update_headers = HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag']}) # apply any container update header overrides sent with request self._check_container_override(update_headers, request.headers) self._check_container_override(update_headers, footer_meta) self.container_update( 'PUT', account, container, obj, request, update_headers, device, policy) return HTTPCreated(request=request, etag=etag)
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except DiskFileExpired as e: orig_timestamp = e.timestamp orig_metadata = e.metadata response_class = HTTPNotFound except DiskFileDeleted as e: orig_timestamp = e.timestamp orig_metadata = {} response_class = HTTPNotFound except (DiskFileNotExist, DiskFileQuarantined): orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound else: orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp < req_timestamp: response_class = HTTPNoContent else: response_class = HTTPConflict response_timestamp = max(orig_timestamp, req_timestamp) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at_val = request.headers['x-if-delete-at'] req_if_delete_at = int(req_if_delete_at_val) except KeyError: pass except ValueError: return HTTPBadRequest( request=request, body='Bad X-If-Delete-At header value') else: # request includes x-if-delete-at; we must not place a tombstone # if we can not verify the x-if-delete-at time if not orig_timestamp: # no object found at all return HTTPNotFound() if orig_delete_at != req_if_delete_at: return HTTPPreconditionFailed( request=request, body='X-If-Delete-At and X-Delete-At do not match') else: # differentiate success from no object at all response_class = HTTPNoContent if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) if orig_timestamp < req_timestamp: disk_file.delete(req_timestamp) self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp.internal}), device, policy) return response_class( request=request, headers={'X-Backend-Timestamp': response_timestamp.internal})
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" print("-------------------- IN OBJ SERVER GET -------------------") device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers) try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): metadata = disk_file.get_metadata() auth_token = request.headers['X-Auth-Token'] #rebac_metadata = metadata['X-Object-Meta-Rebac'] #acl_metadata = metadata['X-Object-Meta-Oacl'] line1=re.sub(r"^'", "", metadata['name']) line2=re.sub(r"'.*", "", line1) line3=re.sub(r"^/", "", line2) line4=re.sub(r"/", ":", line3) authorize_file_info = re.sub(r"^", "cloud1:", line4) authorize_user_info = "cloud1:"+request.headers['X-Project-Id']+":"+request.headers['X-User-Id'] print("--- The user info is -> ", authorize_user_info) print("--- The File info -> ", authorize_file_info) auth_resp = rebac.authorize(auth_token, authorize_user_info, authorize_file_info) if auth_resp == False: raise AttributeError('User Not Allowed to access File') else: print("**** User [authorize_user_info] is allowed to access File [authorize_file_info] ****") obj_size = int(metadata['Content-Length']) file_x_ts = Timestamp(metadata['X-Timestamp']) keep_cache = (self.keep_cache_private or ('X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers)) conditional_etag = None if 'X-Backend-Etag-Is-At' in request.headers: conditional_etag = metadata.get( request.headers['X-Backend-Etag-Is-At']) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata[ 'Content-Encoding'] except KeyError: pass response.headers['X-Timestamp'] = file_x_ts.normal response.headers['X-Backend-Timestamp'] = file_x_ts.internal resp = request.get_response(response) except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal resp = HTTPNotFound(request=request, headers=headers, conditional_response=True) return resp
def GET(self, request): """Handle HTTP GET requests for the Swift Object Server.""" print("-------------------- IN OBJ SERVER GET -------------------") device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: with disk_file.open(): metadata = disk_file.get_metadata() auth_token = request.headers['X-Auth-Token'] #rebac_metadata = metadata['X-Object-Meta-Rebac'] #acl_metadata = metadata['X-Object-Meta-Oacl'] line1 = re.sub(r"^'", "", metadata['name']) line2 = re.sub(r"'.*", "", line1) line3 = re.sub(r"^/", "", line2) line4 = re.sub(r"/", ":", line3) authorize_file_info = re.sub(r"^", "cloud1:", line4) authorize_user_info = "cloud1:" + request.headers[ 'X-Project-Id'] + ":" + request.headers['X-User-Id'] print("--- The user info is -> ", authorize_user_info) print("--- The File info -> ", authorize_file_info) auth_resp = rebac.authorize(auth_token, authorize_user_info, authorize_file_info) if auth_resp == False: raise AttributeError('User Not Allowed to access File') else: print( "**** User [authorize_user_info] is allowed to access File [authorize_file_info] ****" ) obj_size = int(metadata['Content-Length']) file_x_ts = Timestamp(metadata['X-Timestamp']) keep_cache = (self.keep_cache_private or ('X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers)) conditional_etag = None if 'X-Backend-Etag-Is-At' in request.headers: conditional_etag = metadata.get( request.headers['X-Backend-Etag-Is-At']) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True, conditional_etag=conditional_etag) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream') for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass response.headers['X-Timestamp'] = file_x_ts.normal response.headers['X-Backend-Timestamp'] = file_x_ts.internal resp = request.get_response(response) except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal resp = HTTPNotFound(request=request, headers=headers, conditional_response=True) return resp
def DELETE(self, request): """Handle HTTP DELETE requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except DiskFileExpired as e: orig_timestamp = e.timestamp orig_metadata = e.metadata response_class = HTTPNotFound except DiskFileDeleted as e: orig_timestamp = e.timestamp orig_metadata = {} response_class = HTTPNotFound except (DiskFileNotExist, DiskFileQuarantined): orig_timestamp = 0 orig_metadata = {} response_class = HTTPNotFound else: orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp < req_timestamp: response_class = HTTPNoContent else: response_class = HTTPConflict response_timestamp = max(orig_timestamp, req_timestamp) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) try: req_if_delete_at_val = request.headers['x-if-delete-at'] req_if_delete_at = int(req_if_delete_at_val) except KeyError: pass except ValueError: return HTTPBadRequest(request=request, body='Bad X-If-Delete-At header value') else: # request includes x-if-delete-at; we must not place a tombstone # if we can not verify the x-if-delete-at time if not orig_timestamp: # no object found at all return HTTPNotFound() if orig_delete_at != req_if_delete_at: return HTTPPreconditionFailed( request=request, body='X-If-Delete-At and X-Delete-At do not match') else: # differentiate success from no object at all response_class = HTTPNoContent if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy_idx) if orig_timestamp < req_timestamp: disk_file.delete(req_timestamp) self.container_update( 'DELETE', account, container, obj, request, HeaderKeyDict({'x-timestamp': req_timestamp.internal}), device, policy_idx) return response_class( request=request, headers={'X-Backend-Timestamp': response_timestamp.internal})
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if '*' in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 def timeout_reader(): with ChunkReadTimeout(self.client_timeout): return request.environ['wsgi.input'].read( self.network_chunk_size) try: for chunk in iter(lambda: timeout_reader(), ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate('PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if 'etag' in request.headers and \ request.headers['etag'].lower() != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta('object', val[0])) headers_to_copy = (request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy_idx) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy_idx) self.container_update( 'PUT', account, container, obj, request, HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag'] }), device, policy_idx) return HTTPCreated(request=request, etag=etag)
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy_idx = get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get("X-Delete-At") or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body="X-Delete-At in past", request=request, content_type="text/plain") try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type="text/plain") try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy_idx=policy_idx) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if "*" in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get("ETag") in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get("X-Timestamp", 0)) if orig_timestamp >= req_timestamp: return HTTPConflict(request=request, headers={"X-Backend-Timestamp": orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get("X-Delete-At") or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 def timeout_reader(): with ChunkReadTimeout(self.client_timeout): return request.environ["wsgi.input"].read(self.network_chunk_size) try: for chunk in iter(lambda: timeout_reader(), ""): start_time = time.time() if start_time > upload_expiration: self.logger.increment("PUT.timeouts") return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate("PUT." + device + ".timing", elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if "etag" in request.headers and request.headers["etag"].lower() != etag: return HTTPUnprocessableEntity(request=request) metadata = { "X-Timestamp": request.timestamp.internal, "Content-Type": request.headers["content-type"], "ETag": etag, "Content-Length": str(upload_size), } metadata.update(val for val in request.headers.iteritems() if is_sys_or_user_meta("object", val[0])) headers_to_copy = request.headers.get("X-Backend-Replication-Headers", "").split() + list( self.allowed_headers ) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update("PUT", new_delete_at, account, container, obj, request, device, policy_idx) if orig_delete_at: self.delete_at_update("DELETE", orig_delete_at, account, container, obj, request, device, policy_idx) self.container_update( "PUT", account, container, obj, request, HeaderKeyDict( { "x-size": metadata["Content-Length"], "x-content-type": metadata["Content-Type"], "x-timestamp": metadata["X-Timestamp"], "x-etag": metadata["ETag"], } ), device, policy_idx, ) return HTTPCreated(request=request, etag=etag)
def GET(self, request): """Handle HTTP GET requests for the Swift on File object server""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) keep_cache = self.keep_cache_private or ( 'X-Auth-Token' not in request.headers and 'X-Storage-Token' not in request.headers ) # Get Diskfile try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) # Get metadata and append it to response try: with disk_file.open(): metadata = disk_file.get_metadata() obj_size = int(metadata['Content-Length']) file_x_ts = Timestamp(metadata['X-Timestamp']) try: # (HPSS) Our file could end up being on an offline # tape, so we need to check for it and return an # HTTP 'accepted, but still processing' response. if disk_file.is_offline(): return HTTPAccepted(request=request) except (SwiftOnFileSystemIOError, SwiftOnFileFsException): return HTTPServiceUnavailable(request=request) response = Response( app_iter=disk_file.reader(keep_cache=keep_cache), request=request, conditional_response=True ) response.headers['Content-Type'] = metadata.get( 'Content-Type', 'application/octet-stream' ) for key, value in metadata.iteritems(): if is_sys_or_user_meta('object', key) or \ key.lower() in self.allowed_headers: response.headers[key] = value response.etag = metadata['ETag'] response.last_modified = math.ceil(float(file_x_ts)) response.content_length = obj_size try: response.content_encoding = metadata['Content-Encoding'] except KeyError: pass response.headers['X-Timestamp'] = file_x_ts.normal response.headers['X-Backend-Timestamp'] = file_x_ts.internal # (HPSS) Inject HPSS xattr metadata into headers want_hpss_metadata = request.headers.get('X-HPSS-Get-Metadata', False) if config_true_value(want_hpss_metadata): try: hpss_headers = disk_file.read_hpss_system_metadata() response.headers.update(hpss_headers) except SwiftOnFileSystemIOError: return HTTPServiceUnavailable(request=request) return request.get_response(response) except (DiskFileNotExist, DiskFileQuarantined) as e: headers = {} if hasattr(e, 'timestamp'): headers['X-Backend-Timestamp'] = e.timestamp.internal return HTTPNotFound(request=request, headers=headers, conditional_response=True)
def POST(self, request): """Handle HTTP POST requests for the Swift on File object server""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') # Get DiskFile try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) # Set Purgelock status if we got it purgelock = request.headers.get('X-HPSS-Purgelock-Status') if purgelock: try: hpssfs.ioctl(disk_file._fd, hpssfs.HPSSFS_PURGE_LOCK, int(purgelock)) except IOError as err: raise SwiftOnFileSystemIOError( err.errno, '%s, xattr.getxattr("%s", ...)' % (err.strerror, disk_file._fd)) # Set class of service if we got it cos = request.headers.get('X-HPSS-Class-Of-Service-ID') if cos: try: xattr.setxattr(disk_file._fd, 'system.hpss.cos', int(cos)) except IOError as err: raise SwiftOnFileSystemIOError( err.errno, '%s, xattr.setxattr("%s", ...)' % (err.strerror, disk_file._fd)) # Update metadata from request try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): return HTTPNotFound(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict(request=request, headers={ 'X-Backend-Timestamp': orig_timestamp.internal }) metadata = {'X-Timestamp': req_timestamp.internal} metadata.update(val for val in request.headers.iteritems() if is_user_meta('object', val[0])) for header_key in self.allowed_headers: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) disk_file.write_metadata(metadata) return HTTPAccepted(request=request)
def PUT(self, request): """Handle HTTP PUT requests for the Swift on File object server""" try: device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) # check swiftonhpss constraints first error_response = check_object_creation(request, obj) if error_response: return error_response # (HPSS) Shameless copy-paste from ObjectController.PUT and # modification, because we have to do certain things like pass in # purgelock and class-of-service information that Swift won't know # to do and need to do it in a very specific order. new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, context_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') # Try to get DiskFile try: disk_file = self.get_diskfile(device, partition, account, container, obj, policy=policy) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Check for If-None-Match in request if request.if_none_match and orig_metadata: if '*' in request.if_none_match: # File exists already, return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 # (HPSS) Check for HPSS-specific metadata headers cos = request.headers.get('X-Hpss-Class-Of-Service-Id', None) purgelock = config_true_value( request.headers.get('X-Hpss-Purgelock-Status', 'false')) try: # Feed DiskFile our HPSS-specific stuff with disk_file.create(size=fsize, cos=cos) as writer: upload_size = 0 def timeout_reader(): with ChunkReadTimeout(self.client_timeout): return request.environ['wsgi.input'].read( self.network_chunk_size) try: for chunk in iter(lambda: timeout_reader(), ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate('PUT.%s.timing' % device, elapsed_time, upload_size) if fsize and fsize != upload_size: return HTTPClientDisconnect(request=request) etag = etag.hexdigest() if 'etag' in request.headers \ and request.headers['etag'].lower() != etag: return HTTPUnprocessableEntity(request=request) # Update object metadata metadata = {'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } meta_headers = {header: request.headers[header] for header in request.headers if is_sys_or_user_meta('object', header)} metadata.update(meta_headers) backend_headers = \ request.headers.get('X-Backend-Replication-Headers') for header_key in (backend_headers or self.allowed_headers): if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] # (HPSS) Write the file, with added options writer.put(metadata, purgelock=purgelock) except DiskFileNoSpace: return HTTPInsufficientStorage(drive=device, request=request) except SwiftOnFileSystemIOError as e: return HTTPServiceUnavailable(request=request) # FIXME: this stuff really should be handled in DiskFile somehow? # we set the hpss checksum in here, so both systems have valid # and current checksum metadata # (HPSS) Set checksum on file ourselves, if hpssfs won't do it # for us. data_file = disk_file._data_file try: xattr.setxattr(data_file, 'system.hpss.hash', "md5:%s" % etag) except IOError: logging.debug("Could not write ETag to system.hpss.hash," " trying user.hash.checksum") try: xattr.setxattr(data_file, 'user.hash.checksum', etag) xattr.setxattr(data_file, 'user.hash.algorithm', 'md5') xattr.setxattr(data_file, 'user.hash.state', 'Valid') xattr.setxattr(data_file, 'user.hash.filesize', str(upload_size)) xattr.setxattr(data_file, 'user.hash.app', 'swiftonhpss') except IOError as err: raise SwiftOnFileSystemIOError( err.errno, 'Could not write MD5 checksum to HPSS filesystem: ' '%s' % err.strerror) # Update container metadata if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update('PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update('DELETE', orig_delete_at, account, container, obj, request, device, policy) self.container_update('PUT', account, container, obj, request, HeaderKeyDict( {'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag']}), device, policy) # Create convenience symlink try: self._object_symlink(request, disk_file._data_file, device, account) except SwiftOnFileSystemOSError: logging.debug('could not make account symlink') return HTTPServiceUnavailable(request=request) return HTTPCreated(request=request, etag=etag) except (AlreadyExistsAsFile, AlreadyExistsAsDir): device = \ split_and_validate_path(request, 1, 5, True) return HTTPConflict(drive=device, request=request)
def PUT(self, request): """Handle HTTP PUT requests for the Swift Object Server.""" device, partition, account, container, obj, policy = \ get_name_and_placement(request, 5, 5, True) req_timestamp = valid_timestamp(request) error_response = check_object_creation(request, obj) if error_response: return error_response new_delete_at = int(request.headers.get('X-Delete-At') or 0) if new_delete_at and new_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=request, content_type='text/plain') try: fsize = request.message_length() except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') # In case of multipart-MIME put, the proxy sends a chunked request, # but may let us know the real content length so we can verify that # we have enough disk space to hold the object. if fsize is None: fsize = request.headers.get('X-Backend-Obj-Content-Length') if fsize is not None: try: fsize = int(fsize) except ValueError as e: return HTTPBadRequest(body=str(e), request=request, content_type='text/plain') # SSYNC will include Frag-Index header for subrequests to primary # nodes; handoff nodes should 409 subrequests to over-write an # existing data fragment until they offloaded the existing fragment frag_index = request.headers.get('X-Backend-Ssync-Frag-Index') try: disk_file = self.get_diskfile( device, partition, account, container, obj, policy=policy, frag_index=frag_index) except DiskFileDeviceUnavailable: return HTTPInsufficientStorage(drive=device, request=request) try: orig_metadata = disk_file.read_metadata() except DiskFileXattrNotSupported: return HTTPInsufficientStorage(drive=device, request=request) except (DiskFileNotExist, DiskFileQuarantined): orig_metadata = {} # Checks for If-None-Match if request.if_none_match is not None and orig_metadata: if '*' in request.if_none_match: # File exists already so return 412 return HTTPPreconditionFailed(request=request) if orig_metadata.get('ETag') in request.if_none_match: # The current ETag matches, so return 412 return HTTPPreconditionFailed(request=request) orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0)) if orig_timestamp >= req_timestamp: return HTTPConflict( request=request, headers={'X-Backend-Timestamp': orig_timestamp.internal}) orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0) upload_expiration = time.time() + self.max_upload_time etag = md5() elapsed_time = 0 try: with disk_file.create(size=fsize) as writer: upload_size = 0 # If the proxy wants to send us object metadata after the # object body, it sets some headers. We have to tell the # proxy, in the 100 Continue response, that we're able to # parse a multipart MIME document and extract the object and # metadata from it. If we don't, then the proxy won't # actually send the footer metadata. have_metadata_footer = False use_multiphase_commit = False mime_documents_iter = iter([]) obj_input = request.environ['wsgi.input'] hundred_continue_headers = [] if config_true_value( request.headers.get( 'X-Backend-Obj-Multiphase-Commit')): use_multiphase_commit = True hundred_continue_headers.append( ('X-Obj-Multiphase-Commit', 'yes')) if config_true_value( request.headers.get('X-Backend-Obj-Metadata-Footer')): have_metadata_footer = True hundred_continue_headers.append( ('X-Obj-Metadata-Footer', 'yes')) if have_metadata_footer or use_multiphase_commit: obj_input.set_hundred_continue_response_headers( hundred_continue_headers) mime_boundary = request.headers.get( 'X-Backend-Obj-Multipart-Mime-Boundary') if not mime_boundary: return HTTPBadRequest("no MIME boundary") try: with ChunkReadTimeout(self.client_timeout): mime_documents_iter = iter_mime_headers_and_bodies( request.environ['wsgi.input'], mime_boundary, self.network_chunk_size) _junk_hdrs, obj_input = next(mime_documents_iter) except ChunkReadTimeout: return HTTPRequestTimeout(request=request) timeout_reader = self._make_timeout_reader(obj_input) try: for chunk in iter(timeout_reader, ''): start_time = time.time() if start_time > upload_expiration: self.logger.increment('PUT.timeouts') return HTTPRequestTimeout(request=request) etag.update(chunk) upload_size = writer.write(chunk) elapsed_time += time.time() - start_time except ChunkReadTimeout: return HTTPRequestTimeout(request=request) if upload_size: self.logger.transfer_rate( 'PUT.' + device + '.timing', elapsed_time, upload_size) if fsize is not None and fsize != upload_size: return HTTPClientDisconnect(request=request) footer_meta = {} if have_metadata_footer: footer_meta = self._read_metadata_footer( mime_documents_iter) request_etag = (footer_meta.get('etag') or request.headers.get('etag', '')).lower() etag = etag.hexdigest() if request_etag and request_etag != etag: return HTTPUnprocessableEntity(request=request) metadata = { 'X-Timestamp': request.timestamp.internal, 'Content-Type': request.headers['content-type'], 'ETag': etag, 'Content-Length': str(upload_size), } metadata.update(val for val in request.headers.items() if is_sys_or_user_meta('object', val[0])) metadata.update(val for val in footer_meta.items() if is_sys_or_user_meta('object', val[0])) headers_to_copy = ( request.headers.get( 'X-Backend-Replication-Headers', '').split() + list(self.allowed_headers)) for header_key in headers_to_copy: if header_key in request.headers: header_caps = header_key.title() metadata[header_caps] = request.headers[header_key] writer.put(metadata) # if the PUT requires a two-phase commit (a data and a commit # phase) send the proxy server another 100-continue response # to indicate that we are finished writing object data if use_multiphase_commit: request.environ['wsgi.input'].\ send_hundred_continue_response() if not self._read_put_commit_message(mime_documents_iter): return HTTPServerError(request=request) # got 2nd phase confirmation, write a timestamp.durable # state file to indicate a successful PUT writer.commit(request.timestamp) # Drain any remaining MIME docs from the socket. There # shouldn't be any, but we must read the whole request body. try: while True: with ChunkReadTimeout(self.client_timeout): _junk_hdrs, _junk_body = next(mime_documents_iter) drain(_junk_body, self.network_chunk_size, self.client_timeout) except ChunkReadTimeout: raise HTTPClientDisconnect() except StopIteration: pass except (DiskFileXattrNotSupported, DiskFileNoSpace): return HTTPInsufficientStorage(drive=device, request=request) if orig_delete_at != new_delete_at: if new_delete_at: self.delete_at_update( 'PUT', new_delete_at, account, container, obj, request, device, policy) if orig_delete_at: self.delete_at_update( 'DELETE', orig_delete_at, account, container, obj, request, device, policy) update_headers = HeaderKeyDict({ 'x-size': metadata['Content-Length'], 'x-content-type': metadata['Content-Type'], 'x-timestamp': metadata['X-Timestamp'], 'x-etag': metadata['ETag']}) # apply any container update header overrides sent with request self._check_container_override(update_headers, request.headers) self._check_container_override(update_headers, footer_meta) self.container_update( 'PUT', account, container, obj, request, update_headers, device, policy) return HTTPCreated(request=request, etag=etag)