def manage_ssh( f ): @functools.wraps( f ) def wrapped( topology_params, function_params, ssh = None, response = None, *args, **kwargs ): response = HTTPOk() response.information_message = '' response.execution = { HAS_BEEN_EXECUTED : False, STEPS : [], } response.next = {} response.datas = [] response.accepted_commands = [] response.is_ok = False if len( topology_params.aera ) != 2: remote_server_name = '%s-%s-%s-%s-%s' % ( topology_params.appcode, topology_params.env, topology_params.appcomp, topology_params.num_component, topology_params.aera ) else: remote_server_name = '%s-%s-%s-%s%s' % ( topology_params.appcode, topology_params.env, topology_params.aera, topology_params.appcomp, topology_params.num_component ) try: f( topology_params, function_params, connect( remote_server_name ), response, *args, **kwargs ) except NetworkError, e: # abnormal exit app_e = HTTPServiceUnavailable() app_e.information_message = e.message raise app_e except exceptions.SystemExit, e: # abnormal exit app_e = HTTPServiceUnavailable() if e.code == 2: app_e.information_message = e.message else: app_e.information_message = 'bad username (%s) or auth key issue' % lookup( remote_server_name ) raise app_e
def wrapped( topology_params, function_params, ssh = None, response = None, *args, **kwargs ): response = HTTPOk() response.information_message = '' response.execution = { HAS_BEEN_EXECUTED : False, STEPS : [], } response.next = {} response.datas = [] response.accepted_commands = [] response.is_ok = False if len( topology_params.aera ) != 2: remote_server_name = '%s-%s-%s-%s-%s' % ( topology_params.appcode, topology_params.env, topology_params.appcomp, topology_params.num_component, topology_params.aera ) else: remote_server_name = '%s-%s-%s-%s%s' % ( topology_params.appcode, topology_params.env, topology_params.aera, topology_params.appcomp, topology_params.num_component ) try: f( topology_params, function_params, connect( remote_server_name ), response, *args, **kwargs ) except NetworkError, e: # abnormal exit app_e = HTTPServiceUnavailable() app_e.information_message = e.message raise app_e
def localised(globs, request): """ Returns information about the ip_address If GeoLite Cities is not installed, raise an error """ if os.path.isfile(_GEOLITE_CITIES): gi = geoip.GeoIP(_GEOLITE_CITIES) try: record = gi.record_by_addr(request.match['ip_address']) except geoip.GeoIPError: raise HTTPNotImplemented('You can only parse IPv4 addresses') if record: record = dict(record) for key in record.keys(): if hasattr(record[key], 'decode'): record[key] = record[key].decode('ISO-8859-1').encode( 'utf-8') return json.dumps(record) else: return json.dumps({}) else: raise HTTPServiceUnavailable('You must install GeoLite Cities ' ': http://www.maxmind.com/app' '/geolitecountry')
def request_to_ks(self, req, servers, port): """ Routing multiple keystone servers. """ succ_resps = [] fail_resps = [] auth_tokens = self._split_auth_token(req, servers) bodies = self._split_body(req, servers) for site, token, body in zip(servers, auth_tokens, bodies): for node in site: parsed = urlparse(self._combinate_ks_url(node, port, req)) connector = HTTPSConnection if parsed.scheme == 'https' else HTTPConnection try: with ConnectionTimeout(self.conn_timeout): (host, port) = parsed.netloc.split(':') headers = req.headers if req.headers.has_key('Host'): headers['Host'] = host + ':' + str(port) if token: headers['X-Auth-Token'] = token if req.headers.has_key('Content-Length'): del headers['Content-Length'] conn = connector(host, port) conn.request(req.method, parsed.path, body, headers) with Timeout(self.timeout): resp = conn.getresponse() if resp.status >= 200 and resp.status <= 300: succ_resps.append(resp) break else: fail_resps.append(resp) except ValueError, err: fail_resps.append(HTTPPreconditionFailed(request=req)) except (Exception, TimeoutError), err: fail_resps.append(HTTPServiceUnavailable(request=req))
def __call__(self, environ, start_response): try: return self.app(environ, start_response) except TimeoutError: if asbool(self.config.get('debug')): raise else: return HTTPServiceUnavailable()(environ, start_response)
def password_reset(self, request, **data): """Sends an e-mail for a password reset request.""" if self.reset is None: logger.debug('reset attempted, but no resetcode library installed') raise HTTPServiceUnavailable() user_id = self.auth.get_user_id(request.user) if user_id is None: # user not found raise HTTPJsonBadRequest(ERROR_INVALID_USER) self.auth.get_user_info(request.user, ['mail']) if request.user.get('mail') is None: raise HTTPJsonBadRequest(ERROR_NO_EMAIL_ADDRESS) self._check_captcha(request, data) try: # the request looks fine, let's generate the reset code code = self.reset.generate_reset_code(request.user) data = { 'host': request.host_url, 'user_name': request.user['username'], 'code': code } body = render_mako('password_reset_mail.mako', **data) sender = request.config['smtp.sender'] host = request.config['smtp.host'] port = int(request.config['smtp.port']) user = request.config.get('smtp.user') password = request.config.get('smtp.password') subject = 'Resetting your Services password' res, msg = send_email(sender, request.user['mail'], subject, body, host, port, user, password) if not res: raise HTTPServiceUnavailable(msg) except AlreadySentError: #backend handled the reset code email. Keep going pass return text_response('success')
def call(self, request): # Match the Request URL to an action action = self.match(request) try: return action(request) except ServiceUnavailable, e: msg = "Internal storage error: %s" % e logger.critical(msg) raise HTTPServiceUnavailable(msg)
def handleREST(self, env, start_response): """ Handles routing of ReST requests. This handler also logs all requests. :param env: WSGI environment :param start_response: WSGI start_response function """ req = Request(env) logged_headers = None if self.log_headers: logged_headers = '\n'.join( '%s: %s' % (k, v) for k, v in req.headers.items()).replace('"', "#042") start_time = time() # Figure out how to handle the request try: if req.method == 'GET' and req.path.startswith('/v1') or \ req.path.startswith('/auth'): handler = self.handle_auth elif req.method == 'GET' and req.path.startswith('/token/'): handler = self.handle_token elif req.method == 'PUT' and req.path.startswith('/account/'): handler = self.handle_add_user elif req.method == 'POST' and \ req.path == '/recreate_accounts': handler = self.handle_account_recreate else: return HTTPBadRequest(request=env)(env, start_response) response = handler(req) except Exception: self.logger.exception( _('ERROR Unhandled exception in ReST request')) return HTTPServiceUnavailable(request=req)(env, start_response) trans_time = '%.4f' % (time() - start_time) if not response.content_length and response.app_iter and \ hasattr(response.app_iter, '__len__'): response.content_length = sum(map(len, response.app_iter)) the_request = '%s %s' % (req.method, quote(unquote(req.path))) if req.query_string: the_request = the_request + '?' + req.query_string the_request += ' ' + req.environ['SERVER_PROTOCOL'] client = req.headers.get('x-cluster-client-ip') if not client and 'x-forwarded-for' in req.headers: client = req.headers['x-forwarded-for'].split(',')[0].strip() if not client: client = req.remote_addr self.logger.info( '%s - - [%s] "%s" %s %s "%s" "%s" - - - - - - - - - "-" "%s" ' '"%s" %s' % (client, strftime('%d/%b/%Y:%H:%M:%S +0000', gmtime()), the_request, response.status_int, response.content_length or '-', req.referer or '-', req.user_agent or '-', req.remote_addr, logged_headers or '-', trans_time)) return response(env, start_response)
def __call__(self, controller, environ, context): if not self._should_check: return self.next_handler(controller, environ, context) if self._should_check: evolver = context.config.get('tgext.evolve._evolver', None) if evolver is None or not evolver.is_locked(): self._should_check = False return self.next_handler(controller, environ, context) return HTTPServiceUnavailable( detail='System is currently undergoing maintenance')
def _dispatch_request(self, request): """Dispatch the request. This will dispatch the request either to a special internal handler or to one of the configured controller methods. """ # XXX # removing the trailing slash - ambiguity on client side url = request.path_info.rstrip('/') if url != '': request.environ['PATH_INFO'] = request.path_info = url # the heartbeat page is called if url == '/%s' % self.heartbeat_page: # the app is shutting down, we want to return a 503 if self.shutting: raise HTTPServiceUnavailable() # otherwise we do call the heartbeat page if (self.heartbeat_page is not None and request.method in ('HEAD', 'GET')): return self._heartbeat(request) # the debug page is called if self.debug_page is not None and url == '/%s' % self.debug_page: return self._debug(request) # the request must be going to a controller method match = self.mapper.routematch(environ=request.environ) if match is None: # Check whether there is a match on just the path. # If not then it's a 404; if so then it's a 405. match = self.mapper.routematch(url=request.path_info) if match is None: return HTTPNotFound() else: return HTTPMethodNotAllowed() match, __ = match # if auth is enabled, wrap it around the call to the controller if self.auth is None: return self._dispatch_request_with_match(request, match) else: self.auth.check(request, match) try: response = self._dispatch_request_with_match(request, match) except HTTPException, response: self.auth.acknowledge(request, response) raise else:
def delete_password_reset(self, request, **data): """Forces a password reset clear""" if self.reset is None: logger.debug('reset attempted, but no resetcode library installed') raise HTTPServiceUnavailable() self._check_captcha(request, data) self.auth.get_user_id(request.user) self.reset.clear_reset_code(request.user) log_cef("User requested password reset clear", 9, request.environ, self.app.config, request.user.get('username'), PASSWD_RESET_CLR) return text_response('success')
def fetch_image(self, folder_name, image_url): filename, extension, content_type = self.get_details(image_url) image_url = image_url.replace(filename, urllib.quote(filename), 10) logging.info(image_url) fetched_image = urlfetch.fetch(image_url.replace(' ', '', 12)) if fetched_image.status_code == 500: raise HTTPServiceUnavailable() content_type = fetched_image.headers.get('content-type', content_type) return (folder_name, filename + '-' + uuid.uuid4().hex, extension, content_type, fetched_image.content)
def handle_add_user(self, request): """ Handles Rest requests from developers to have a user added. If the account specified doesn't exist, it will also be added. Currently, updating a user's information (password, admin access) must be done by directly updating the sqlite database. Valid URL paths: * PUT /account/<account-name>/<user-name> - create the account Valid headers: * X-Auth-User-Key: <password> * X-Auth-User-Admin: <true|false> * X-Auth-User-Reseller-Admin: <true|false> If the HTTP request returns with a 204, then the user was added, and the storage url will be available in the X-Storage-Url header. :param request: webob.Request object """ try: _junk, account_name, user_name = \ split_path(request.path, minsegs=3) except ValueError: return HTTPBadRequest() create_reseller_admin = \ request.headers.get('x-auth-user-reseller-admin') == 'true' if create_reseller_admin and ( request.headers.get('X-Auth-Admin-User') != '.super_admin' or request.headers.get('X-Auth-Admin-Key') != self.super_admin_key): return HTTPUnauthorized(request=request) create_account_admin = \ request.headers.get('x-auth-user-admin') == 'true' if create_account_admin and \ not self.is_account_admin(request, account_name): return HTTPForbidden(request=request) if 'X-Auth-User-Key' not in request.headers: return HTTPBadRequest(body='X-Auth-User-Key is required') password = request.headers['x-auth-user-key'] storage_url = self.create_user(account_name, user_name, password, create_account_admin, create_reseller_admin) if storage_url == 'already exists': return HTTPConflict(body=storage_url) if not storage_url: return HTTPServiceUnavailable() return HTTPNoContent(headers={'x-storage-url': storage_url})
def add_all(application, project_id, control_client, loader=service.Loaders.FROM_SERVICE_MANAGEMENT): """Adds all endpoints middleware to a wsgi application. Sets up application to use all default endpoints middleware. Example: >>> application = MyWsgiApp() # an existing WSGI application >>> >>> # the name of the controlled service >>> service_name = 'my-service-name' >>> >>> # A GCP project with service control enabled >>> project_id = 'my-project-id' >>> >>> # wrap the app for service control >>> from endpoints_management.control import wsgi >>> control_client = client.Loaders.DEFAULT.load(service_name) >>> control_client.start() >>> wrapped_app = add_all(application, project_id, control_client) >>> >>> # now use wrapped_app in place of app Args: application: the wrapped wsgi application project_id: the project_id thats providing service control support control_client: the service control client instance loader (:class:`endpoints_management.control.service.Loader`): loads the service instance that configures this instance's behaviour """ try: a_service = loader.load() if not a_service: raise ValueError(u'No service config loaded.') except (ServiceConfigException, ValueError): logger.exception( u'Failed to load service config, installing server error handler.') # This will answer all requests with HTTP 503 Service Unavailable return HTTPServiceUnavailable() authenticator = _create_authenticator(a_service) wrapped_app = Middleware(application, project_id, control_client) if authenticator: wrapped_app = AuthenticationMiddleware(wrapped_app, authenticator) return EnvironmentMiddleware(wrapped_app, a_service)
class CatchAuthError(object): def __init__(self, app, retry_after='120'): self.app = app if isinstance(retry_after, int): retry_after = str(retry_after) self.retry_after = retry_after @wsgify def __call__(self, request): try: return request.get_response(self.app) except (HTTPUnauthorized, StorageAuthError), e: logger.debug(traceback.format_exc()) return HTTPUnauthorized(str(e)) except (ConnectionError, ServerError, HTTPServiceUnavailable), e: logger.error(traceback.format_exc()) return HTTPServiceUnavailable(str(e), retry_after=self.retry_after)
def __call__(self, request): # finding a match match = self.mapper.routematch(environ=request.environ) if match is None: return HTTPNotFound() match, __ = match hook = self._hooks.get(match['action']) if hook is None: raise HTTPNotFound('Unknown URL %r' % request.path_info) hook.preconditions(request, self.globs) function = hook['func'] # the GET mapping is filled on GET and DELETE requests if request.method in ('GET', 'DELETE'): params = dict(request.GET) else: params = {} request.match = match try: result = function(self.globs, request, **params) except BackendError: err = traceback.format_exc() logger.error(err) raise HTTPServiceUnavailable(retry_after=self.retry_after) if isinstance(result, basestring): response = getattr(request, 'response', None) if response is None: response = Response(result) elif isinstance(result, str): response.body = result else: # if it's not str it's unicode, which really shouldn't happen response.body = result.encode('utf-8') else: # result is already a Response response = result hook.postconditions(response, request, self.globs) return response
def get_recommended_nodes(self, volume_type_name, size, count=3, imaging=False, affinity='', force_node=None): q = self.get_fill_strategy(volume_type_name, size, count, imaging, affinity, force_node) nodes = [] for node, storage_used in q: node._storage_used = storage_used nodes.append(node) if not nodes: if not self.db.query(Node)\ .filter_by(volume_type_name=volume_type_name, status='ACTIVE').count(): raise HTTPServiceUnavailable( "No nodes for type '%s' are ACTIVE" % volume_type_name) raise HTTPInsufficientStorage( "No suitable node to place volume of size %s" % size) if self.app.fill_strategy != 'deep_fill': shuffle(nodes) return nodes
def _get_new_cid(client_id): tries = 0 ttl = time.time() + _TTL content = ttl, [client_id], _EMPTY, None while tries < 100: new_cid = generate_cid(cid_len) if _cache.get(new_cid) is not None: tries += 1 continue # already taken success = _cache.add(new_cid, content) #, time=ttl) if success: break tries += 1 if not success: raise HTTPServiceUnavailable() return new_cid
def __call__(self, request): """Entry point for the WSGI app.""" # the app is being killed, no more requests please if self.killing: raise HTTPServiceUnavailable() request.server_time = round_time() # gets request-specific config request.config = self._host_specific(request.host, self.config) # pre-hook before_headers = self._before_call(request) try: response = self._dispatch_request(request) except HTTPException, response: # set before-call headers on all responses response.headers.update(before_headers) raise
def __init__(self, application, project_id, control_client, loader=service.Loaders.FROM_SERVICE_MANAGEMENT, disable_threading=False): self.service_config = None self.background_thread = None self.threading_failed = disable_threading # This will answer all requests with HTTP 503 Service Unavailable self.wsgi_backend = HTTPServiceUnavailable() self.application = application self.project_id = project_id self.control_client = control_client self.loader = loader self.try_loading() self.wrap_app() if self.service_config is None: self.launch_loading_thread()
def do_password_reset(self, request): """Do a password reset.""" if self.reset is None: logger.debug('reset attempted, but no resetcode library installed') raise HTTPServiceUnavailable() user_name = request.POST.get('username') if user_name is not None: user_name = extract_username(user_name) if request.POST.keys() == ['username']: # setting up a password reset # XXX add support for captcha here via **data request.user = User(user_name) try: self.password_reset(request) except (HTTPServiceUnavailable, HTTPJsonBadRequest), e: return render_mako('password_failure.mako', error=e.detail) else: return render_mako('password_key_sent.mako') raise HTTPJsonBadRequest()
def _upload(self, req, image_meta): """ Uploads the payload of the request to a backend store in Glance. If the `x-image-meta-store` header is set, Glance will attempt to use that scheme; if not, Glance will use the scheme set by the flag `default_store` to find the backing store. :param req: The WSGI/Webob Request object :param image_meta: Mapping of metadata about image :raises HTTPConflict if image already exists :retval The location where the image was stored """ copy_from = self._copy_from(req) if copy_from: try: image_data, image_size = self._get_from_store(req.context, copy_from) except Exception as e: self._safe_kill(req, image_meta['id']) msg = _("Copy from external source failed: %s") % e LOG.debug(msg) return image_meta['size'] = image_size or image_meta['size'] else: try: req.get_content_type('application/octet-stream') except exception.InvalidContentType: self._safe_kill(req, image_meta['id']) msg = _("Content-Type must be application/octet-stream") LOG.debug(msg) raise HTTPBadRequest(explanation=msg) image_data = req.body_file scheme = req.headers.get('x-image-meta-store', CONF.default_store) store = self.get_store_or_400(req, scheme) image_id = image_meta['id'] LOG.debug(_("Setting image %s to status 'saving'"), image_id) registry.update_image_metadata(req.context, image_id, {'status': 'saving'}) LOG.debug(_("Uploading image data for image %(image_id)s " "to %(scheme)s store"), locals()) try: self.notifier.info("image.prepare", redact_loc(image_meta)) location, size, checksum = store.add( image_meta['id'], utils.CooperativeReader(image_data), image_meta['size']) def _kill_mismatched(image_meta, attr, actual): supplied = image_meta.get(attr) if supplied and supplied != actual: msg = _("Supplied %(attr)s (%(supplied)s) and " "%(attr)s generated from uploaded image " "(%(actual)s) did not match. Setting image " "status to 'killed'.") % locals() LOG.error(msg) self._safe_kill(req, image_id) self._initiate_deletion(req, location, image_id) raise HTTPBadRequest(explanation=msg, content_type="text/plain", request=req) # Verify any supplied size/checksum value matches size/checksum # returned from store when adding image _kill_mismatched(image_meta, 'size', size) _kill_mismatched(image_meta, 'checksum', checksum) # Update the database with the checksum returned # from the backend store LOG.debug(_("Updating image %(image_id)s data. " "Checksum set to %(checksum)s, size set " "to %(size)d"), locals()) update_data = {'checksum': checksum, 'size': size} image_meta = registry.update_image_metadata(req.context, image_id, update_data) self.notifier.info('image.upload', redact_loc(image_meta)) return location except exception.Duplicate as e: msg = _("Attempt to upload duplicate image: %s") % e LOG.debug(msg) self._safe_kill(req, image_id) raise HTTPConflict(explanation=msg, request=req) except exception.Forbidden as e: msg = _("Forbidden upload attempt: %s") % e LOG.debug(msg) self._safe_kill(req, image_id) raise HTTPForbidden(explanation=msg, request=req, content_type="text/plain") except exception.StorageFull as e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied as e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded as e: msg = _("Denying attempt to upload image larger than %d bytes." % CONF.image_size_cap) LOG.info(msg) self._safe_kill(req, image_id) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except HTTPError as e: self._safe_kill(req, image_id) #NOTE(bcwaldon): Ideally, we would just call 'raise' here, # but something in the above function calls is affecting the # exception context and we must explicitly re-raise the # caught exception. raise e except Exception as e: LOG.exception(_("Failed to upload image")) self._safe_kill(req, image_id) raise HTTPInternalServerError(request=req)
except exception.StorageFull, e: msg = _("Image storage media is full: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPRequestEntityTooLarge(explanation=msg, request=req, content_type='text/plain') except exception.StorageWriteDenied, e: msg = _("Insufficient permissions on image storage media: %s") % e LOG.error(msg) self._safe_kill(req, image_id) self.notifier.error('image.upload', msg) raise HTTPServiceUnavailable(explanation=msg, request=req, content_type='text/plain') except exception.ImageSizeLimitExceeded, e: msg = _("Denying attempt to upload image larger than %d.") self._safe_kill(req, image_id) raise HTTPBadRequest(explanation=msg % CONF.image_size_cap, request=req, content_type='text/plain') except HTTPError, e: self._safe_kill(req, image_id) self.notifier.error('image.upload', e.explanation) raise except Exception, e:
def _check_server(self, request): raise HTTPServiceUnavailable()
def create_response_for_authservice_unavailable(self, req, error): content_type, detail = self.get_error_body(req, error) return HTTPServiceUnavailable(body=detail, content_type=content_type, request=req)
def __call__(self): """ :return httplib.HTTP(S)Connection in success, and webob.exc.HTTPException in failure """ if self.headers.has_key('content-length'): if int(self.headers['content-length']) >= MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=self.req) parsed = urlparse(self.url) if self.proxy: proxy_parsed = urlparse(self.proxy) if self._proxy_request_check(parsed.path): host, port = self.split_netloc(proxy_parsed) path = self.url ssl = True if proxy_parsed.scheme == 'https' else False else: host, port = self.split_netloc(parsed) path = parsed.path ssl = True if parsed.scheme == 'https' else False self.headers['host'] = '%s:%s' % (host, port) if self.method == 'PUT' and len(parsed.path.split('/')) >= 5: if self.headers.has_key('content-length') and int( self.headers['content-length']) != 0: if not self.headers.has_key('expect'): self.headers['expect'] = '100-continue' chunked = self.req.headers.get('transfer-encoding') if isinstance(self.req.environ['wsgi.input'], str): reader = self.req.environ['wsgi.input'].read data_source = iter(lambda: reader(self.chunk_size), '') else: data_source = self.req.environ['wsgi.input'] bytes_transferred = 0 try: conn = self._connect_put_node(host, port, self.method, path, headers=self.headers, query_string=parsed.query, ssl=ssl) if not conn: return HTTPServiceUnavailable(request=self.req) with ContextPool(1) as pool: conn.failed = False conn.queue = Queue(10) pool.spawn(self._send_file, conn, path) while True: with ChunkReadTimeout(self.client_timeout): try: chunk = next(data_source) except StopIteration: if chunked: conn.queue.put('0\r\n\r\n') break except TypeError, err: self.logger.info('Chunk Read Error: %s' % err) break except Exception, err: self.logger.info('Chunk Read Error: %s' % err) return HTTPServerError(request=self.req) bytes_transferred += len(chunk) if bytes_transferred > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=self.req) if not conn.failed: conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk) if chunked else chunk)
'Destination': orig_container + '/' + orig_obj } copy_environ = { 'REQUEST_METHOD': 'COPY', 'swift_versioned_copy': True } creq = Request.blank(copy_path, headers=copy_headers, environ=copy_environ) copy_resp = self.COPY(creq) if is_client_error(copy_resp.status_int): # some user error, maybe permissions return HTTPPreconditionFailed(request=req) elif not is_success(copy_resp.status_int): # could not copy the data, bail return HTTPServiceUnavailable(request=req) # reset these because the COPY changed them self.container_name = lcontainer self.object_name = last_item['name'] new_del_req = Request.blank(copy_path, environ=req.environ) (container_partition, containers, _junk, new_del_req.acl, _junk, _junk) = \ self.container_info(self.account_name, self.container_name) new_del_req.path_info = copy_path req = new_del_req if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req)
def PUT(self, req): """HTTP PUT request handler.""" (container_partition, containers, _junk, req.acl, req.environ['swift_sync_key'], object_versions) = \ self.container_info(self.account_name, self.container_name, account_autocreate=self.app.account_autocreate) if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not containers: return HTTPNotFound(request=req) if 'x-delete-after' in req.headers: try: x_delete_after = int(req.headers['x-delete-after']) except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-After') req.headers['x-delete-at'] = '%d' % (time.time() + x_delete_after) if 'x-delete-at' in req.headers: try: x_delete_at = int(req.headers['x-delete-at']) if x_delete_at < time.time(): return HTTPBadRequest(body='X-Delete-At in past', request=req, content_type='text/plain') except ValueError: return HTTPBadRequest(request=req, content_type='text/plain', body='Non-integer X-Delete-At') delete_at_container = str( x_delete_at / self.app.expiring_objects_container_divisor * self.app.expiring_objects_container_divisor) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) else: delete_at_part = delete_at_nodes = None partition, nodes = self.app.object_ring.get_nodes( self.account_name, self.container_name, self.object_name) # do a HEAD request for container sync and checking object versions if 'x-timestamp' in req.headers or ( object_versions and not req.environ.get('swift_versioned_copy')): hreq = Request.blank(req.path_info, headers={'X-Newest': 'True'}, environ={'REQUEST_METHOD': 'HEAD'}) hresp = self.GETorHEAD_base(hreq, _('Object'), partition, nodes, hreq.path_info, len(nodes)) # Used by container sync feature if 'x-timestamp' in req.headers: try: req.headers['X-Timestamp'] = \ normalize_timestamp(float(req.headers['x-timestamp'])) if hresp.environ and 'swift_x_timestamp' in hresp.environ and \ float(hresp.environ['swift_x_timestamp']) >= \ float(req.headers['x-timestamp']): return HTTPAccepted(request=req) except ValueError: return HTTPBadRequest( request=req, content_type='text/plain', body='X-Timestamp should be a UNIX timestamp float value; ' 'was %r' % req.headers['x-timestamp']) else: req.headers['X-Timestamp'] = normalize_timestamp(time.time()) # Sometimes the 'content-type' header exists, but is set to None. content_type_manually_set = True if not req.headers.get('content-type'): guessed_type, _junk = mimetypes.guess_type(req.path_info) req.headers['Content-Type'] = guessed_type or \ 'application/octet-stream' content_type_manually_set = False error_response = check_object_creation(req, self.object_name) if error_response: return error_response if object_versions and not req.environ.get('swift_versioned_copy'): is_manifest = 'x-object-manifest' in req.headers or \ 'x-object-manifest' in hresp.headers if hresp.status_int != HTTP_NOT_FOUND and not is_manifest: # This is a version manifest and needs to be handled # differently. First copy the existing data to a new object, # then write the data from this request to the version manifest # object. lcontainer = object_versions.split('/')[0] prefix_len = '%03x' % len(self.object_name) lprefix = prefix_len + self.object_name + '/' ts_source = hresp.environ.get('swift_x_timestamp') if ts_source is None: ts_source = time.mktime( time.strptime(hresp.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')) new_ts = normalize_timestamp(ts_source) vers_obj_name = lprefix + new_ts copy_headers = { 'Destination': '%s/%s' % (lcontainer, vers_obj_name) } copy_environ = { 'REQUEST_METHOD': 'COPY', 'swift_versioned_copy': True } copy_req = Request.blank(req.path_info, headers=copy_headers, environ=copy_environ) copy_resp = self.COPY(copy_req) if is_client_error(copy_resp.status_int): # missing container or bad permissions return HTTPPreconditionFailed(request=req) elif not is_success(copy_resp.status_int): # could not copy the data, bail return HTTPServiceUnavailable(request=req) reader = req.environ['wsgi.input'].read data_source = iter(lambda: reader(self.app.client_chunk_size), '') source_header = req.headers.get('X-Copy-From') source_resp = None if source_header: source_header = unquote(source_header) acct = req.path_info.split('/', 2)[1] if isinstance(acct, unicode): acct = acct.encode('utf-8') if not source_header.startswith('/'): source_header = '/' + source_header source_header = '/' + acct + source_header try: src_container_name, src_obj_name = \ source_header.split('/', 3)[2:] except ValueError: return HTTPPreconditionFailed( request=req, body='X-Copy-From header must be of the form' '<container name>/<object name>') source_req = req.copy_get() source_req.path_info = source_header source_req.headers['X-Newest'] = 'true' orig_obj_name = self.object_name orig_container_name = self.container_name self.object_name = src_obj_name self.container_name = src_container_name source_resp = self.GET(source_req) if source_resp.status_int >= HTTP_MULTIPLE_CHOICES: return source_resp self.object_name = orig_obj_name self.container_name = orig_container_name new_req = Request.blank(req.path_info, environ=req.environ, headers=req.headers) data_source = source_resp.app_iter new_req.content_length = source_resp.content_length if new_req.content_length is None: # This indicates a transfer-encoding: chunked source object, # which currently only happens because there are more than # CONTAINER_LISTING_LIMIT segments in a segmented object. In # this case, we're going to refuse to do the server-side copy. return HTTPRequestEntityTooLarge(request=req) new_req.etag = source_resp.etag # we no longer need the X-Copy-From header del new_req.headers['X-Copy-From'] if not content_type_manually_set: new_req.headers['Content-Type'] = \ source_resp.headers['Content-Type'] if new_req.headers.get('x-fresh-metadata', 'false').lower() \ not in TRUE_VALUES: for k, v in source_resp.headers.items(): if k.lower().startswith('x-object-meta-'): new_req.headers[k] = v for k, v in req.headers.items(): if k.lower().startswith('x-object-meta-'): new_req.headers[k] = v req = new_req node_iter = self.iter_nodes(partition, nodes, self.app.object_ring) pile = GreenPile(len(nodes)) for container in containers: nheaders = dict(req.headers.iteritems()) nheaders['Connection'] = 'close' nheaders['X-Container-Host'] = '%(ip)s:%(port)s' % container nheaders['X-Container-Partition'] = container_partition nheaders['X-Container-Device'] = container['device'] nheaders['Expect'] = '100-continue' if delete_at_nodes: node = delete_at_nodes.pop(0) nheaders['X-Delete-At-Host'] = '%(ip)s:%(port)s' % node nheaders['X-Delete-At-Partition'] = delete_at_part nheaders['X-Delete-At-Device'] = node['device'] pile.spawn(self._connect_put_node, node_iter, partition, req.path_info, nheaders, self.app.logger.thread_locals) conns = [conn for conn in pile if conn] if len(conns) <= len(nodes) / 2: self.app.logger.error( _('Object PUT returning 503, %(conns)s/%(nodes)s ' 'required connections'), { 'conns': len(conns), 'nodes': len(nodes) // 2 + 1 }) return HTTPServiceUnavailable(request=req) chunked = req.headers.get('transfer-encoding') bytes_transferred = 0 try: with ContextPool(len(nodes)) as pool: for conn in conns: conn.failed = False conn.queue = Queue(self.app.put_queue_depth) pool.spawn(self._send_file, conn, req.path) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) except StopIteration: if chunked: [conn.queue.put('0\r\n\r\n') for conn in conns] break bytes_transferred += len(chunk) if bytes_transferred > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(request=req) for conn in list(conns): if not conn.failed: conn.queue.put('%x\r\n%s\r\n' % (len(chunk), chunk) if chunked else chunk) else: conns.remove(conn) if len(conns) <= len(nodes) / 2: self.app.logger.error( _('Object PUT exceptions during' ' send, %(conns)s/%(nodes)s required connections' ), { 'conns': len(conns), 'nodes': len(nodes) / 2 + 1 }) return HTTPServiceUnavailable(request=req) for conn in conns: if conn.queue.unfinished_tasks: conn.queue.join() conns = [conn for conn in conns if not conn.failed] except ChunkReadTimeout, err: self.app.logger.warn(_('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') return HTTPRequestTimeout(request=req)
class VolumeController(BaseController): def index(self, request): """ GET /v1.0/{account_id}/volumes List volumes """ q = self.account_query(Volume) available_filters = set( ['status', 'account_id', 'node_id', 'id', 'restore_of', 'name']) filters = dict((k, v) for k, v in request.params.items() if k in available_filters) if filters: q = q.filter_by(**filters) cinder_host = request.params.get('cinder_host') if cinder_host: q = q.join(Node).filter(Node.cinder_host == cinder_host) return Response([dict(r) for r in q.all()]) def _validate_name(self, params): name = params.get('name', self.id) if not re.match("^[A-Za-z0-9-]+$", name): raise HTTPPreconditionFailed("Invalid name '%s'" % name) return name def _validate_volume_type(self, params): try: volume_type_name = params['volume_type_name'] except KeyError: raise HTTPBadRequest("Must specify 'volume_type_name'") volume_type = self.db.query(VolumeType).get(volume_type_name) if not volume_type or volume_type.status != "ACTIVE": raise HTTPPreconditionFailed("Invalid volume type '%s'" % volume_type_name) return volume_type def _validate_backup(self, params): backup = params.get('backup') if not backup: return None try: backup = self.account_query(Backup).filter_by(id=backup).one() except NoResultFound: raise HTTPPreconditionFailed("No Backup '%s'" % backup) if backup.status != 'AVAILABLE': raise HTTPPreconditionFailed("Backup '%s' must be AVAILABLE, " "not '%s'" % (backup.id, backup.status)) return backup def _validate_source(self, params): source_volume = params.get('source_volume') if not source_volume: return None try: source = self.account_query(Volume) \ .filter_by(id=source_volume) \ .one() except NoResultFound: raise HTTPPreconditionFailed("No source '%s'" % source_volume) if source.status != 'ACTIVE': raise HTTPPreconditionFailed("Source '%s' must be 'ACTIVE', " "not '%s'" % (source.id, source.status)) if not source.node: raise HTTPPreconditionFailed("Source has no node.") return source def _validate_size(self, params, volume_type, backup=None, source=None): try: size = int(params['size']) except KeyError: raise HTTPBadRequest("Must specify 'size' parameter") except ValueError: raise HTTPPreconditionFailed("'size' parameter must be an " "integer") if size < volume_type.min_size or size > volume_type.max_size: raise HTTPPreconditionFailed( "'size' parameter must be between " "%s and %s" % (volume_type.min_size, volume_type.max_size)) if backup: if size < backup.size: msg = "'size' must be >= backup size: %d" % backup.size raise HTTPPreconditionFailed(msg) if source: if size < source.size: msg = "'size' must be >= source volume size: %d" % source.size raise HTTPPreconditionFailed(msg) return size def _validate_force_node(self, params): force_node = params.get('force_node') if not force_node: return None try: self.db.query(Node).filter( or_(Node.id == force_node, Node.name == force_node)).one() except NoResultFound: raise HTTPPreconditionFailed('Invalid force_node: %s' % force_node) return force_node def _validate_affinity(self, params): affinity = params.get('affinity') if not affinity: return '' try: affinity_type, affinity_rule = affinity.split(':') except ValueError: msg = "Invalid affinity: %s" % affinity raise HTTPPreconditionFailed(msg) if affinity_type not in ('different_node', 'different_group'): msg = "Invalid affinity type: %s" % affinity_type raise HTTPPreconditionFailed(msg) return affinity def _validate_transfer(self, params): if 'account_id' in params: account = self.db.get_or_create_account(params['account_id']) if account.status != 'ACTIVE': raise HTTPNotFound('Account is not ACTIVE') try: volume = self.account_query(Volume).filter_by(id=self.id).one() except NoResultFound: raise HTTPNotFound("Cannot transfer non-existent volume '%s'" % self.id) def _assign_node(self, volume, backup, source, nodes): """ Assigns the new volume to a node. :returns: dict, node response on successful placement :raises: HTTPError """ request_params = { 'account': self.account_id, 'size': volume.size, 'read_iops': volume.volume_type.read_iops, 'write_iops': volume.volume_type.write_iops, } if volume.image_id: request_params['image_id'] = volume.image_id if backup: request_params['backup_source_volume_id'] = backup.volume.name request_params['backup_id'] = backup.id volume.restore_of = backup.id if source: request_params['source_volume_id'] = source.name request_params['source_host'] = source.node.hostname request_params['source_port'] = source.node.port last_node_error = None for node in nodes: volume.node = node self.db.commit() # prevent duplicate/lost volumes try: path = '/volumes/%s' % volume.name return self.node_request(node, 'PUT', path, **request_params) except NodeError, e: last_node_error = e # log server errors and continue if (e.code // 100) == 5: logger.error(str(e)) continue # pass client error up to user, immediately break if not last_node_error: raise HTTPServiceUnavailable( "No available storage nodes for type '%s'" % volume.volume_type.name) volume.status = 'DELETED' self.db.commit() # force commit before wsgi rollback # pass last error to user raise last_node_error
def __call__(self, request): if request.method in ('HEAD',): raise HTTPBadRequest('"%s" not supported' % request.method) request.server_time = round_time() # gets request-specific config request.config = self._host_specific(request.host, self.config) # pre-hook before_headers = self._before_call(request) # XXX # removing the trailing slash - ambiguity on client side url = request.path_info.rstrip('/') if url != '': request.environ['PATH_INFO'] = request.path_info = url if (self.heartbeat_page is not None and url == '/%s' % self.heartbeat_page): return self._heartbeat(request) if self.debug_page is not None and url == '/%s' % self.debug_page: return self._debug(request) match = self.mapper.routematch(environ=request.environ) if match is None: return HTTPNotFound() match, __ = match # authentication control if self.auth is not None: self.auth.check(request, match) function = self._get_function(match['controller'], match['action']) if function is None: raise HTTPNotFound('Unkown URL %r' % request.path_info) # extracting all the info from the headers and the url request.sync_info = match # the GET mapping is filled on GET and DELETE requests if request.method in ('GET', 'DELETE'): params = dict(request.GET) else: params = {} try: result = function(request, **params) except BackendError: err = traceback.format_exc() logger.error(err) raise HTTPServiceUnavailable(retry_after=self.retry_after) if isinstance(result, basestring): response = getattr(request, 'response', None) if response is None: response = Response(result) elif isinstance(result, str): response.body = result else: # if it's not str it's unicode, which really shouldn't happen module = getattr(function, '__module__', 'unknown') name = getattr(function, '__name__', 'unknown') logger.warn('Unicode response returned from: %s - %s' % (module, name)) response.unicode_body = result else: # result is already a Response response = result # setting up the X-Weave-Timestamp response.headers['X-Weave-Timestamp'] = str(request.server_time) response.headers.update(before_headers) return response
LOG.debug('Unable to pull account from request path') return HTTPNotFound()(environ, start_response) try: token = environ['HTTP_X_AUTH_TOKEN'] except KeyError: LOG.debug('Unable to pull token from request headers') return HTTPUnauthorized()(environ, start_response) try: LOG.debug('Validate token') token_info = self.get_token_info(token, account) except InvalidUserToken, e: LOG.info('Invalid token (%s)' % e) return HTTPUnauthorized()(environ, start_response) except Exception: LOG.exception('Unable to validate token') return HTTPServiceUnavailable()(environ, start_response) LOG.info('Token valid') headers = self.get_headers(token_info) LOG.debug('adding headers -> %r' % headers) for header, value in headers.items(): environ['HTTP_' + header.upper().replace('-', '_')] = value return self.app(environ, start_response) def filter_factory(global_conf, **local_conf): """Returns a callable that returns a piece of WSGI middleware.""" def auth_filter(app): return RackAuth(local_conf, app) return auth_filter