def run_server(): wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet.patcher.monkey_patch(all=False, socket=True) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) pool = GreenPool(size=1024) try: wsgi.server(sock, app, NullLogger(), custom_pool=pool) except socket.error, err: if err[0] != errno.EINVAL: raise
conf = appconfig('config:%s' % conf_file, name=app_section) except Exception, e: raise ConfigFileError("Error trying to load config %s: %s" % (conf_file, e)) # pre-configure logger log_name = conf.get('log_name', app_section) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, log_to_console=kwargs.pop('verbose', False), log_route='wsgi') # disable fallocate if desired if config_true_value(conf.get('disable_fallocate', 'no')): disable_fallocate() monkey_patch_mimetools() app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) return (app, conf, logger, log_name) class WSGIContext(object): """ This class provides a means to provide context (scope) for a middleware filter to have access to the wsgi start_response results like the request status and headers. """ def __init__(self, wsgi_app): self.app = wsgi_app
def GETorHEAD_base(self, req, server_type, partition, nodes, path, attempts): """ Base handler for HTTP GET or HEAD requests. :param req: swob.Request object :param server_type: server type :param partition: partition :param nodes: nodes :param path: path for the request :param attempts: number of attempts to try :returns: swob.Response object """ statuses = [] reasons = [] bodies = [] sources = [] newest = config_true_value(req.headers.get('x-newest', 'f')) nodes = iter(nodes) while len(statuses) < attempts: try: node = nodes.next() except StopIteration: break if self.error_limited(node): continue start_node_timing = time.time() try: with ConnectionTimeout(self.app.conn_timeout): headers = dict(req.headers) headers['Connection'] = 'close' conn = http_connect( node['ip'], node['port'], node['device'], partition, req.method, path, headers=headers, query_string=req.query_string) self.app.set_node_timing(node, time.time() - start_node_timing) with Timeout(self.app.node_timeout): possible_source = conn.getresponse() # See NOTE: gate_conn at top of file about this. possible_source.gate_conn = conn except (Exception, Timeout): self.exception_occurred( node, server_type, _('Trying to %(method)s %(path)s') % {'method': req.method, 'path': req.path}) continue if self.is_good_source(possible_source): # 404 if we know we don't have a synced copy if not float(possible_source.getheader('X-PUT-Timestamp', 1)): statuses.append(HTTP_NOT_FOUND) reasons.append('') bodies.append('') self.close_gate_conn(possible_source) else: statuses.append(possible_source.status) reasons.append(possible_source.reason) bodies.append('') sources.append(possible_source) if not newest: # one good source is enough break else: statuses.append(possible_source.status) reasons.append(possible_source.reason) bodies.append(possible_source.read()) if possible_source.status == HTTP_INSUFFICIENT_STORAGE: self.error_limit(node) elif is_server_error(possible_source.status): self.error_occurred(node, _('ERROR %(status)d %(body)s ' 'From %(type)s Server') % {'status': possible_source.status, 'body': bodies[-1][:1024], 'type': server_type}) if sources: sources.sort(key=source_key) source = sources.pop() for src in sources: self.close_gate_conn(src) res = Response(request=req, conditional_response=True) if req.method == 'GET' and \ source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT): res.app_iter = self._make_app_iter(node, source) # See NOTE: gate_conn at top of file about this. res.gate_conn = source.gate_conn res.status = source.status update_headers(res, source.getheaders()) if not res.environ: res.environ = {} res.environ['gate_x_timestamp'] = \ source.getheader('x-timestamp') res.accept_ranges = 'bytes' res.content_length = source.getheader('Content-Length') if source.getheader('Content-Type'): res.charset = None res.content_type = source.getheader('Content-Type') return res return self.best_response(req, statuses, reasons, bodies, '%s %s' % (server_type, req.method))
def __init__(self, conf): self.conf = conf self.logger = get_logger(conf, log_route='transport.memcached') self.servers = conf.get('servers', 'localhost:11211').split(';') self.debug = config_true_value(conf.get('debug', 'False'))
except Exception, e: raise ConfigFileError("Error trying to load config %s: %s" % (conf_file, e)) # pre-configure logger log_name = conf.get('log_name', app_section) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, log_to_console=kwargs.pop('verbose', False), log_route='wsgi') # disable fallocate if desired if config_true_value(conf.get('disable_fallocate', 'no')): disable_fallocate() monkey_patch_mimetools() app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) return (app, conf, logger, log_name) class WSGIContext(object): """ This class provides a means to provide context (scope) for a middleware filter to have access to the wsgi start_response results like the request status and headers. """ def __init__(self, wsgi_app): self.app = wsgi_app