def get_response_body(data_format, data_dict, error_list): """ Returns a properly formatted response body according to format. :params data_format: resulting format :params data_dict: generated data about results. :params error_list: list of quoted filenames that failed """ if data_format == 'text/plain': output = '' for key in sorted(data_dict.keys()): output += '%s: %s\n' % (key, data_dict[key]) output += 'Errors:\n' output += '\n'.join( ['%s, %s' % (name, status) for name, status in error_list]) return output if data_format == 'application/json': data_dict['Errors'] = error_list return json.dumps(data_dict) if data_format.endswith('/xml'): output = '<?xml version="1.0" encoding="UTF-8"?>\n<delete>\n' for key in sorted(data_dict.keys()): xml_key = key.replace(' ', '_').lower() output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key) output += '<errors>\n' output += '\n'.join([ '<object>' '<name>%s</name><status>%s</status>' '</object>' % (saxutils.escape(name), status) for name, status in error_list ]) output += '</errors>\n</delete>\n' return output raise HTTPNotAcceptable('Invalid output type')
def get_listing_content_type(req): req_format = req.params.get('format') if req_format: req.accept = _format_map.get(req_format.lower(), _format_map.get('plain')) req_format = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not req_format: raise HTTPNotAcceptable() return req_format
def account_listing_content_type(req): """ Figure out the content type of an account-listing response. Returns a 2-tuple: (content_type, error). Only one of them will be set; the other will be None. """ query_format = get_param(req, 'format') if query_format: req.accept = FORMAT2CONTENT_TYPE.get(query_format.lower(), FORMAT2CONTENT_TYPE['plain']) content_type = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not content_type: return (None, HTTPNotAcceptable(request=req)) else: return (content_type, None)
def get_listing_content_type(req): """ Determine the content type to use for an account or container listing response. :param req: request object :returns: content type as a string (e.g. text/plain, application/json) :raises: HTTPNotAcceptable if the requested content type is not acceptable :raises: HTTPBadRequest if the 'format' query param is provided and not valid UTF-8 """ query_format = get_param(req, 'format') if query_format: req.accept = FORMAT2CONTENT_TYPE.get( query_format.lower(), FORMAT2CONTENT_TYPE['plain']) out_content_type = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not out_content_type: raise HTTPNotAcceptable(request=req) return out_content_type
def handle_extract(self, req, compress_type): """ :params req: a swob Request :params compress_type: specifying the compression type of the tar. Accepts '', 'gz, or 'bz2' :raises HTTPException: on unhandled errors :returns: a swob response to request """ success_count = 0 failed_files = [] existing_containers = set() out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if not out_content_type: return HTTPNotAcceptable(request=req) if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': return HTTPLengthRequired(request=req) try: vrs, account, extract_base = req.split_path(2, 3, True) except ValueError: return HTTPNotFound(request=req) extract_base = extract_base or '' extract_base = extract_base.rstrip('/') try: tar = tarfile.open(mode='r|' + compress_type, fileobj=req.body_file) while True: tar_info = tar.next() if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break if tar_info.isfile(): obj_path = tar_info.name if obj_path.startswith('./'): obj_path = obj_path[2:] obj_path = obj_path.lstrip('/') if extract_base: obj_path = extract_base + '/' + obj_path if '/' not in obj_path: continue # ignore base level file destination = '/'.join(['', vrs, account, obj_path]) container = obj_path.split('/', 1)[0] if not check_utf8(destination): failed_files.append([ quote(destination[:MAX_PATH_LENGTH]), HTTPPreconditionFailed().status ]) continue if tar_info.size > MAX_FILE_SIZE: failed_files.append([ quote(destination[:MAX_PATH_LENGTH]), HTTPRequestEntityTooLarge().status ]) continue if container not in existing_containers: try: self.create_container( req, '/'.join(['', vrs, account, container])) existing_containers.add(container) except CreateContainerError, err: if err.status_int == HTTP_UNAUTHORIZED: return HTTPUnauthorized(request=req) failed_files.append([ quote(destination[:MAX_PATH_LENGTH]), err.status ]) continue except ValueError: failed_files.append([ quote(destination[:MAX_PATH_LENGTH]), HTTP_BAD_REQUEST ]) continue if len(existing_containers) > self.max_containers: return HTTPBadRequest( 'More than %d base level containers in tar.' % self.max_containers)
def handle_delete(self, req, objs_to_delete=None, user_agent='BulkDelete', swift_source='BD'): """ :params req: a swob Request :raises HTTPException: on unhandled errors :returns: a swob Response """ try: vrs, account, _junk = req.split_path(2, 3, True) except ValueError: return HTTPNotFound(request=req) incoming_format = req.headers.get('Content-Type') if incoming_format and not incoming_format.startswith('text/plain'): # For now only accept newline separated object names return HTTPNotAcceptable(request=req) out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if not out_content_type: return HTTPNotAcceptable(request=req) if objs_to_delete is None: objs_to_delete = self.get_objs_to_delete(req) failed_files = [] success_count = not_found_count = 0 failed_file_response_type = HTTPBadRequest for obj_to_delete in objs_to_delete: obj_to_delete = obj_to_delete.strip().lstrip('/') if not obj_to_delete: continue delete_path = '/'.join(['', vrs, account, obj_to_delete]) if not check_utf8(delete_path): failed_files.append( [quote(delete_path), HTTPPreconditionFailed().status]) continue new_env = req.environ.copy() new_env['PATH_INFO'] = delete_path del (new_env['wsgi.input']) new_env['CONTENT_LENGTH'] = 0 new_env['HTTP_USER_AGENT'] = \ '%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent) new_env['swift.source'] = swift_source delete_obj_req = Request.blank(delete_path, new_env) resp = delete_obj_req.get_response(self.app) if resp.status_int // 100 == 2: success_count += 1 elif resp.status_int == HTTP_NOT_FOUND: not_found_count += 1 elif resp.status_int == HTTP_UNAUTHORIZED: return HTTPUnauthorized(request=req) else: if resp.status_int // 100 == 5: failed_file_response_type = HTTPBadGateway failed_files.append([quote(delete_path), resp.status]) resp_body = get_response_body(out_content_type, { 'Number Deleted': success_count, 'Number Not Found': not_found_count }, failed_files) if (success_count or not_found_count) and not failed_files: return HTTPOk(resp_body, content_type=out_content_type) if failed_files: return failed_file_response_type(resp_body, content_type=out_content_type) return HTTPBadRequest('Invalid bulk delete.')
def handle_extract_iter(self, req, compress_type, out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will extract and PUT the objects pulled from the request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params compress_type: specifying the compression type of the tar. Accepts '', 'gz', or 'bz2' """ resp_dict = { 'Response Status': HTTPCreated().status, 'Response Body': '', 'Number Files Created': 0 } failed_files = [] last_yield = time() if out_content_type and out_content_type.endswith('/xml'): to_yield = b'<?xml version="1.0" encoding="UTF-8"?>\n' else: to_yield = b' ' separator = b'' containers_accessed = set() req.environ['eventlet.minimum_write_chunk_size'] = 0 try: if not out_content_type: raise HTTPNotAcceptable(request=req) if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) try: vrs, account, extract_base = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) extract_base = extract_base or '' extract_base = extract_base.rstrip('/') tar = tarfile.open(mode='r|' + compress_type, fileobj=req.body_file) failed_response_type = HTTPBadRequest containers_created = 0 while True: if last_yield + self.yield_frequency < time(): last_yield = time() yield to_yield to_yield, separator = b' ', b'\r\n\r\n' tar_info = tar.next() if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break if tar_info.isfile(): obj_path = tar_info.name if not six.PY2: obj_path = obj_path.encode('utf-8', 'surrogateescape') obj_path = bytes_to_wsgi(obj_path) if obj_path.startswith('./'): obj_path = obj_path[2:] obj_path = obj_path.lstrip('/') if extract_base: obj_path = extract_base + '/' + obj_path if '/' not in obj_path: continue # ignore base level file destination = '/'.join(['', vrs, account, obj_path]) container = obj_path.split('/', 1)[0] if not constraints.check_utf8(wsgi_to_str(destination)): failed_files.append([ wsgi_quote(obj_path[:self.max_path_length]), HTTPPreconditionFailed().status ]) continue if tar_info.size > constraints.MAX_FILE_SIZE: failed_files.append([ wsgi_quote(obj_path[:self.max_path_length]), HTTPRequestEntityTooLarge().status ]) continue container_failure = None if container not in containers_accessed: cont_path = '/'.join(['', vrs, account, container]) try: if self.create_container(req, cont_path): containers_created += 1 if containers_created > self.max_containers: raise HTTPBadRequest( 'More than %d containers to create ' 'from tar.' % self.max_containers) except CreateContainerError as err: # the object PUT to this container still may # succeed if acls are set container_failure = [ wsgi_quote(cont_path[:self.max_path_length]), err.status ] if err.status_int == HTTP_UNAUTHORIZED: raise HTTPUnauthorized(request=req) except ValueError: failed_files.append([ wsgi_quote(obj_path[:self.max_path_length]), HTTPBadRequest().status ]) continue tar_file = tar.extractfile(tar_info) create_headers = { 'Content-Length': tar_info.size, 'X-Auth-Token': req.headers.get('X-Auth-Token'), } # Copy some whitelisted headers to the subrequest for k, v in req.headers.items(): if ((k.lower() in ('x-delete-at', 'x-delete-after')) or is_user_meta('object', k)): create_headers[k] = v create_obj_req = make_subrequest( req.environ, method='PUT', path=wsgi_quote(destination), headers=create_headers, agent='%(orig)s BulkExpand', swift_source='EA') create_obj_req.environ['wsgi.input'] = tar_file for pax_key, pax_value in tar_info.pax_headers.items(): header_name = pax_key_to_swift_header(pax_key) if header_name: # Both pax_key and pax_value are unicode # strings; the key is already UTF-8 encoded, but # we still have to encode the value. create_obj_req.headers[header_name] = \ pax_value.encode("utf-8") resp = create_obj_req.get_response(self.app) containers_accessed.add(container) if resp.is_success: resp_dict['Number Files Created'] += 1 else: if container_failure: failed_files.append(container_failure) if resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([ wsgi_quote(obj_path[:self.max_path_length]), HTTPUnauthorized().status ]) raise HTTPUnauthorized(request=req) if resp.status_int // 100 == 5: failed_response_type = HTTPBadGateway failed_files.append([ wsgi_quote(obj_path[:self.max_path_length]), resp.status ]) if failed_files: resp_dict['Response Status'] = failed_response_type().status elif not resp_dict['Number Files Created']: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body.decode('utf-8') except (tarfile.TarError, zlib.error) as tar_error: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error except Exception: self.logger.exception('Error in extract archive.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body(out_content_type, resp_dict, failed_files, 'extract')
def handle_delete_iter(self, req, objs_to_delete=None, user_agent='BulkDelete', swift_source='BD', out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will delete the objects specified in request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params objs_to_delete: a list of dictionaries that specifies the (native string) objects to be deleted. If None, uses self.get_objs_to_delete to query request. """ last_yield = time() if out_content_type and out_content_type.endswith('/xml'): to_yield = b'<?xml version="1.0" encoding="UTF-8"?>\n' else: to_yield = b' ' separator = b'' failed_files = [] resp_dict = { 'Response Status': HTTPOk().status, 'Response Body': '', 'Number Deleted': 0, 'Number Not Found': 0 } req.environ['eventlet.minimum_write_chunk_size'] = 0 try: if not out_content_type: raise HTTPNotAcceptable(request=req) try: vrs, account, _junk = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) vrs = wsgi_to_str(vrs) account = wsgi_to_str(account) incoming_format = req.headers.get('Content-Type') if incoming_format and \ not incoming_format.startswith('text/plain'): # For now only accept newline separated object names raise HTTPNotAcceptable(request=req) if objs_to_delete is None: objs_to_delete = self.get_objs_to_delete(req) failed_file_response = {'type': HTTPBadRequest} def delete_filter(predicate, objs_to_delete): for obj_to_delete in objs_to_delete: obj_name = obj_to_delete['name'] if not obj_name: continue if not predicate(obj_name): continue if obj_to_delete.get('error'): if obj_to_delete['error']['code'] == HTTP_NOT_FOUND: resp_dict['Number Not Found'] += 1 else: failed_files.append([ wsgi_quote(str_to_wsgi(obj_name)), obj_to_delete['error']['message'] ]) continue delete_path = '/'.join( ['', vrs, account, obj_name.lstrip('/')]) if not constraints.check_utf8(delete_path): failed_files.append([ wsgi_quote(str_to_wsgi(obj_name)), HTTPPreconditionFailed().status ]) continue yield (obj_name, delete_path, obj_to_delete.get('version_id')) def objs_then_containers(objs_to_delete): # process all objects first yield delete_filter(lambda name: '/' in name.strip('/'), objs_to_delete) # followed by containers yield delete_filter(lambda name: '/' not in name.strip('/'), objs_to_delete) def do_delete(obj_name, delete_path, version_id): delete_obj_req = make_subrequest( req.environ, method='DELETE', path=wsgi_quote(str_to_wsgi(delete_path)), headers={'X-Auth-Token': req.headers.get('X-Auth-Token')}, body='', agent='%(orig)s ' + user_agent, swift_source=swift_source) if version_id is None: delete_obj_req.params = {} else: delete_obj_req.params = {'version-id': version_id} return (delete_obj_req.get_response(self.app), obj_name, 0) with StreamingPile(self.delete_concurrency) as pile: for names_to_delete in objs_then_containers(objs_to_delete): for resp, obj_name, retry in pile.asyncstarmap( do_delete, names_to_delete): if last_yield + self.yield_frequency < time(): last_yield = time() yield to_yield to_yield, separator = b' ', b'\r\n\r\n' self._process_delete(resp, pile, obj_name, resp_dict, failed_files, failed_file_response, retry) if len(failed_files) >= self.max_failed_deletes: # Abort, but drain off the in-progress deletes for resp, obj_name, retry in pile: if last_yield + self.yield_frequency < time(): last_yield = time() yield to_yield to_yield, separator = b' ', b'\r\n\r\n' # Don't pass in the pile, as we shouldn't retry self._process_delete(resp, None, obj_name, resp_dict, failed_files, failed_file_response, retry) msg = 'Max delete failures exceeded' raise HTTPBadRequest(msg) if failed_files: resp_dict['Response Status'] = \ failed_file_response['type']().status elif not (resp_dict['Number Deleted'] or resp_dict['Number Not Found']): resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid bulk delete.' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body.decode('utf-8') except Exception: self.logger.exception('Error in bulk delete.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body(out_content_type, resp_dict, failed_files, 'delete')
def handle_extract_iter(self, req, compress_type, out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will extract and PUT the objects pulled from the request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params compress_type: specifying the compression type of the tar. Accepts '', 'gz', or 'bz2' """ resp_dict = {'Response Status': HTTPCreated().status, 'Response Body': '', 'Number Files Created': 0} failed_files = [] last_yield = time() separator = '' containers_accessed = set() try: if not out_content_type: raise HTTPNotAcceptable(request=req) if out_content_type.endswith('/xml'): yield '<?xml version="1.0" encoding="UTF-8"?>\n' if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) try: vrs, account, extract_base = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) extract_base = extract_base or '' extract_base = extract_base.rstrip('/') tar = tarfile.open(mode='r|' + compress_type, fileobj=req.body_file) failed_response_type = HTTPBadRequest req.environ['eventlet.minimum_write_chunk_size'] = 0 containers_created = 0 while True: if last_yield + self.yield_frequency < time(): separator = '\r\n\r\n' last_yield = time() yield ' ' tar_info = tar.next() if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break if tar_info.isfile(): obj_path = tar_info.name if obj_path.startswith('./'): obj_path = obj_path[2:] obj_path = obj_path.lstrip('/') if extract_base: obj_path = extract_base + '/' + obj_path if '/' not in obj_path: continue # ignore base level file destination = '/'.join( ['', vrs, account, obj_path]) container = obj_path.split('/', 1)[0] if not check_utf8(destination): failed_files.append( [quote(obj_path[:MAX_PATH_LENGTH]), HTTPPreconditionFailed().status]) continue if tar_info.size > MAX_FILE_SIZE: failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPRequestEntityTooLarge().status]) continue container_failure = None if container not in containers_accessed: cont_path = '/'.join(['', vrs, account, container]) try: if self.create_container(req, cont_path): containers_created += 1 if containers_created > self.max_containers: raise HTTPBadRequest( 'More than %d containers to create ' 'from tar.' % self.max_containers) except CreateContainerError as err: # the object PUT to this container still may # succeed if acls are set container_failure = [ quote(cont_path[:MAX_PATH_LENGTH]), err.status] if err.status_int == HTTP_UNAUTHORIZED: raise HTTPUnauthorized(request=req) except ValueError: failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPBadRequest().status]) continue tar_file = tar.extractfile(tar_info) new_env = req.environ.copy() new_env['REQUEST_METHOD'] = 'PUT' new_env['wsgi.input'] = tar_file new_env['PATH_INFO'] = destination new_env['CONTENT_LENGTH'] = tar_info.size new_env['swift.source'] = 'EA' new_env['HTTP_USER_AGENT'] = \ '%s BulkExpand' % req.environ.get('HTTP_USER_AGENT') create_obj_req = Request.blank(destination, new_env) resp = create_obj_req.get_response(self.app) containers_accessed.add(container) if resp.is_success: resp_dict['Number Files Created'] += 1 else: if container_failure: failed_files.append(container_failure) if resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPUnauthorized().status]) raise HTTPUnauthorized(request=req) if resp.status_int // 100 == 5: failed_response_type = HTTPBadGateway failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), resp.status]) if failed_files: resp_dict['Response Status'] = failed_response_type().status elif not resp_dict['Number Files Created']: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body except (tarfile.TarError, zlib.error) as tar_error: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error except Exception: self.logger.exception('Error in extract archive.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body( out_content_type, resp_dict, failed_files)
def handle_delete_iter(self, req, objs_to_delete=None, user_agent='BulkDelete', swift_source='BD', out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will delete the objects specified in request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params objs_to_delete: a list of dictionaries that specifies the objects to be deleted. If None, uses self.get_objs_to_delete to query request. """ last_yield = time() separator = '' failed_files = [] resp_dict = {'Response Status': HTTPOk().status, 'Response Body': '', 'Number Deleted': 0, 'Number Not Found': 0} try: if not out_content_type: raise HTTPNotAcceptable(request=req) if out_content_type.endswith('/xml'): yield '<?xml version="1.0" encoding="UTF-8"?>\n' try: vrs, account, _junk = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) incoming_format = req.headers.get('Content-Type') if incoming_format and \ not incoming_format.startswith('text/plain'): # For now only accept newline separated object names raise HTTPNotAcceptable(request=req) if objs_to_delete is None: objs_to_delete = self.get_objs_to_delete(req) failed_file_response_type = HTTPBadRequest req.environ['eventlet.minimum_write_chunk_size'] = 0 for obj_to_delete in objs_to_delete: if last_yield + self.yield_frequency < time(): separator = '\r\n\r\n' last_yield = time() yield ' ' obj_name = obj_to_delete['name'] if not obj_name: continue if len(failed_files) >= self.max_failed_deletes: raise HTTPBadRequest('Max delete failures exceeded') if obj_to_delete.get('error'): if obj_to_delete['error']['code'] == HTTP_NOT_FOUND: resp_dict['Number Not Found'] += 1 else: failed_files.append([quote(obj_name), obj_to_delete['error']['message']]) continue delete_path = '/'.join(['', vrs, account, obj_name.lstrip('/')]) if not check_utf8(delete_path): failed_files.append([quote(obj_name), HTTPPreconditionFailed().status]) continue new_env = req.environ.copy() new_env['PATH_INFO'] = delete_path del(new_env['wsgi.input']) new_env['CONTENT_LENGTH'] = 0 new_env['HTTP_USER_AGENT'] = \ '%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent) new_env['swift.source'] = swift_source delete_obj_req = Request.blank(delete_path, new_env) resp = delete_obj_req.get_response(self.app) if resp.status_int // 100 == 2: resp_dict['Number Deleted'] += 1 elif resp.status_int == HTTP_NOT_FOUND: resp_dict['Number Not Found'] += 1 elif resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([quote(obj_name), HTTPUnauthorized().status]) else: if resp.status_int // 100 == 5: failed_file_response_type = HTTPBadGateway failed_files.append([quote(obj_name), resp.status]) if failed_files: resp_dict['Response Status'] = \ failed_file_response_type().status elif not (resp_dict['Number Deleted'] or resp_dict['Number Not Found']): resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid bulk delete.' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body except Exception: self.logger.exception('Error in bulk delete.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body(out_content_type, resp_dict, failed_files)
'X-Container-Bytes-Used': info['bytes_used'], 'X-Timestamp': info['created_at'], 'X-PUT-Timestamp': info['put_timestamp'], } headers.update( (key, value) for key, (value, timestamp) in broker.metadata.iteritems() if value != '' and (key.lower() in self.save_headers or key.lower().startswith('x-container-meta-'))) if get_param(req, 'format'): req.accept = FORMAT2CONTENT_TYPE.get( get_param(req, 'format').lower(), FORMAT2CONTENT_TYPE['plain']) headers['Content-Type'] = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not headers['Content-Type']: return HTTPNotAcceptable(request=req) return HTTPNoContent(request=req, headers=headers, charset='utf-8') @public @timing_stats def GET(self, req): """Handle HTTP GET request.""" try: drive, part, account, container, obj = split_path( unquote(req.path), 4, 5, True) validate_device_partition(drive, part) except ValueError, err: return HTTPBadRequest(body=str(err), content_type='text/plain', request=req) if self.mount_check and not check_mount(self.root, drive):
def handle_extract_iter(self, req, compress_type, out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will extract and PUT the objects pulled from the request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params compress_type: specifying the compression type of the tar. Accepts '', 'gz', or 'bz2' """ resp_dict = { 'Response Status': HTTPCreated().status, 'Response Body': '', 'Number Files Created': 0 } failed_files = [] last_yield = time() separator = '' containers_accessed = set() try: if not out_content_type: raise HTTPNotAcceptable(request=req) if out_content_type.endswith('/xml'): yield '<?xml version="1.0" encoding="UTF-8"?>\n' if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) try: vrs, account, extract_base = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) extract_base = extract_base or '' extract_base = extract_base.rstrip('/') tar = tarfile.open(mode='r|' + compress_type, fileobj=req.body_file) failed_response_type = HTTPBadRequest req.environ['eventlet.minimum_write_chunk_size'] = 0 containers_created = 0 while True: if last_yield + self.yield_frequency < time(): separator = '\r\n\r\n' last_yield = time() yield ' ' tar_info = tar.next() if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break if tar_info.isfile(): obj_path = tar_info.name if obj_path.startswith('./'): obj_path = obj_path[2:] obj_path = obj_path.lstrip('/') if extract_base: obj_path = extract_base + '/' + obj_path if '/' not in obj_path: continue # ignore base level file destination = '/'.join(['', vrs, account, obj_path]) container = obj_path.split('/', 1)[0] if not check_utf8(destination): failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPPreconditionFailed().status ]) continue if tar_info.size > MAX_FILE_SIZE: failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPRequestEntityTooLarge().status ]) continue container_failure = None if container not in containers_accessed: cont_path = '/'.join(['', vrs, account, container]) try: if self.create_container(req, cont_path): containers_created += 1 if containers_created > self.max_containers: raise HTTPBadRequest( 'More than %d containers to create ' 'from tar.' % self.max_containers) except CreateContainerError, err: # the object PUT to this container still may # succeed if acls are set container_failure = [ quote(cont_path[:MAX_PATH_LENGTH]), err.status ] if err.status_int == HTTP_UNAUTHORIZED: raise HTTPUnauthorized(request=req) except ValueError: failed_files.append([ quote(obj_path[:MAX_PATH_LENGTH]), HTTPBadRequest().status ]) continue
def _list_versions(self, req, start_response, location): # Only supports JSON listings req.environ['swift.format_listing'] = False if not req.accept.best_match(['application/json']): raise HTTPNotAcceptable(request=req) params = req.params # FIXME(FVE) this is probably not working with oio-sds backend if 'version_marker' in params: if 'marker' not in params: raise HTTPBadRequest('version_marker param requires marker') if params['version_marker'] != 'null': try: ts = Timestamp(params.pop('version_marker')) except ValueError: raise HTTPBadRequest('invalid version_marker param') params['marker'] = self._build_versions_object_name( params['marker'], ts) delim = params.get('delimiter', '') # Exclude the set of chars used in version_id from user delimiters if set(delim).intersection('0123456789.%s' % RESERVED_STR): raise HTTPBadRequest('invalid delimiter param') null_listing = [] subdir_set = set() account = req.split_path(3, 3, True)[1] versions_req = make_pre_authed_request( req.environ, method='GET', swift_source='OV', path=wsgi_quote('/v1/%s/%s' % (account, location)), headers={'X-Backend-Allow-Reserved-Names': 'true'}, ) versions_req.environ['oio.query'] = {'versions': True} # NB: no end_marker support (yet) versions_req.params = { k: params.get(k, '') for k in ('prefix', 'marker', 'limit', 'delimiter', 'reverse', 'format') } versions_resp = versions_req.get_response(self.app) if versions_resp.status_int == HTTP_NOT_FOUND: raise versions_resp elif is_success(versions_resp.status_int): try: listing = json.loads(versions_resp.body) except ValueError: app_resp = [versions_resp.body] else: versions_listing = [] for item in listing: if 'subdir' in item: subdir_set.add(item['subdir']) else: item['version_id'] = str(item.get('version', 'null')) versions_listing.append(item) if (item['content_type'] == DELETE_MARKER_CONTENT_TYPE ): item['hash'] = MD5_OF_EMPTY_STRING subdir_listing = [{'subdir': s} for s in subdir_set] broken_listing = [] limit = constrain_req_limit(req, CONTAINER_LISTING_LIMIT) body = build_listing( null_listing, versions_listing, subdir_listing, broken_listing, reverse=config_true_value(params.get('reverse', 'no')), limit=limit, ) self.update_content_length(len(body)) app_resp = [body] else: return versions_resp(versions_req.environ, start_response) start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp