def _munge_webdav_source_port(self, request, env): agent = get_header(USER_AGENT, request.header) if self._wdav_client_reg(agent): env['WEBDAV_SOURCE_PORT'] = 1 path_info = env['PATH_INFO'] path_info = posixpath.join(path_info, 'manage_DAVget') path_info = posixpath.normpath(path_info) env['PATH_INFO'] = path_info
def handle_request(self,request): self.hits.increment() DebugLogger.log('B', id(request), '%s %s' % (request.command.upper(), request.uri)) size=get_header(CONTENT_LENGTH, request.header) if size and size != '0': size=int(size) zhttp_collector(self, request, size) else: sin=StringIO() self.continue_request(sin,request)
def continue_request(self, sin, request): "continue handling request now that we have the stdin" s=get_header(CONTENT_LENGTH, request.header) if s: s=int(s) else: s=0 DebugLogger.log('I', id(request), s) env=self.get_environment(request) version = request.version if version=='1.0' and is_proxying_match(request.request): # a request that was made as if this zope was an http 1.0 proxy. # that means we have to use some slightly different http # headers to manage persistent connections. connection_re = proxying_connection_re else: # a normal http request connection_re = CONNECTION env['http_connection'] = get_header(connection_re, request.header).lower() env['server_version']=request.channel.server.SERVER_IDENT env['wsgi.output'] = ChannelPipe(request) env['wsgi.input'] = sin env['wsgi.errors'] = sys.stderr env['wsgi.version'] = (1,0) env['wsgi.multithread'] = True env['wsgi.multiprocess'] = True env['wsgi.run_once'] = True env['wsgi.url_scheme'] = env['SERVER_PROTOCOL'].split('/')[0] request.channel.current_request=None request.channel.queue.append(('Zope2WSGI', env, env['wsgi.output'].start_response)) request.channel.work()
def make_response(request, headers): "Simple http response factory" # should this be integrated into the HTTPResponse constructor? response=ZServerHTTPResponse(stdout=ChannelPipe(request), stderr=StringIO()) response._http_version=request.version if request.version=='1.0' and is_proxying_match(request.request): # a request that was made as if this zope was an http 1.0 proxy. # that means we have to use some slightly different http # headers to manage persistent connections. connection_re = proxying_connection_re else: # a normal http request connection_re = http_server.CONNECTION response._http_connection = http_server.get_header(connection_re, request.header).lower() response._server_version=request.channel.server.SERVER_IDENT return response
def continue_request(self, sin, request): "continue handling request now that we have the stdin" s=get_header(CONTENT_LENGTH, request.header) if s: s=int(s) else: s=0 DebugLogger.log('I', id(request), s) env=self.get_environment(request) zresponse=make_response(request,env) if self._force_connection_close: zresponse._http_connection = 'close' zrequest=HTTPRequest(sin, env, zresponse) request.channel.current_request=None request.channel.queue.append((self.module_name, zrequest, zresponse)) request.channel.work()
def make_response(request, headers): "Simple http response factory" # should this be integrated into the HTTPResponse constructor? response = ZServerHTTPResponse(stdout=ChannelPipe(request), stderr=StringIO()) response._http_version = request.version if request.version == '1.0' and is_proxying_match(request.request): # a request that was made as if this zope was an http 1.0 proxy. # that means we have to use some slightly different http # headers to manage persistent connections. connection_re = proxying_connection_re else: # a normal http request connection_re = http_server.CONNECTION response._http_connection = http_server.get_header(connection_re, request.header).lower() response._server_version = request.channel.server.SERVER_IDENT return response
def getresponse(self, body): self.request['Content-Type'] = 'text/xml' self.request['Content-Length'] = len(body) self.request.push(body) connection = get_header(self.CONNECTION, self.request.header) close_it = 0 wrap_in_chunking = 0 if self.request.version == '1.0': if connection == 'keep-alive': if not self.request.has_key ('Content-Length'): close_it = 1 else: self.request['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.request.version == '1.1': if connection == 'close': close_it = 1 elif not self.request.has_key ('Content-Length'): if self.request.has_key ('Transfer-Encoding'): if not self.request['Transfer-Encoding'] == 'chunked': close_it = 1 elif self.request.use_chunked: self.request['Transfer-Encoding'] = 'chunked' wrap_in_chunking = 1 else: close_it = 1 elif self.request.version is None: close_it = 1 outgoing_header = producers.simple_producer ( self.request.build_reply_header()) if close_it: self.request['Connection'] = 'close' if wrap_in_chunking: outgoing_producer = producers.chunked_producer ( producers.composite_producer (self.request.outgoing) ) # prepend the header outgoing_producer = producers.composite_producer( [outgoing_header, outgoing_producer] ) else: # prepend the header self.request.outgoing.insert(0, outgoing_header) outgoing_producer = producers.composite_producer ( self.request.outgoing) # apply a few final transformations to the output self.request.channel.push_with_producer ( # globbing gives us large packets producers.globbing_producer ( # hooking lets us log the number of bytes sent producers.hooked_producer ( outgoing_producer, self.request.log ) ) ) self.request.channel.current_request = None if close_it: self.request.channel.close_when_done()
def sendresponse(self, response): headers = response.get('headers', {}) for header in headers: self.request[header] = headers[header] if not self.request.has_key('Content-Type'): self.request['Content-Type'] = 'text/plain' if headers.get('Location'): self.request['Content-Length'] = 0 self.request.error(301) return body = response.get('body', '') self.request['Content-Length'] = len(body) self.request.push(body) connection = get_header(self.CONNECTION, self.request.header) close_it = 0 wrap_in_chunking = 0 if self.request.version == '1.0': if connection == 'keep-alive': if not self.request.has_key('Content-Length'): close_it = 1 else: self.request['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.request.version == '1.1': if connection == 'close': close_it = 1 elif not self.request.has_key('Content-Length'): if self.request.has_key('Transfer-Encoding'): if not self.request['Transfer-Encoding'] == 'chunked': close_it = 1 elif self.request.use_chunked: self.request['Transfer-Encoding'] = 'chunked' wrap_in_chunking = 1 else: close_it = 1 elif self.request.version is None: close_it = 1 outgoing_header = producers.simple_producer( self.request.build_reply_header()) if close_it: self.request['Connection'] = 'close' if wrap_in_chunking: outgoing_producer = producers.chunked_producer( producers.composite_producer(self.request.outgoing)) # prepend the header outgoing_producer = producers.composite_producer( [outgoing_header, outgoing_producer]) else: # prepend the header self.request.outgoing.insert(0, outgoing_header) outgoing_producer = producers.composite_producer( self.request.outgoing) # apply a few final transformations to the output self.request.channel.push_with_producer( # globbing gives us large packets producers.globbing_producer( # hooking lets us log the number of bytes sent producers.hooked_producer(outgoing_producer, self.request.log))) self.request.channel.current_request = None if close_it: self.request.channel.close_when_done()
def get_environment(self, request, # These are strictly performance hackery... h2ehas=header2env.has_key, h2eget=header2env.get, workdir=os.getcwd(), ospath=os.path, ): (path, params, query, fragment) = request.split_uri() if params: path = path + params # undo medusa bug! while path and path[0] == '/': path = path[1:] if '%' in path: path = unquote(path) if query: # ZPublisher doesn't want the leading '?' query = query[1:] server=request.channel.server env = {} env['REQUEST_METHOD']=request.command.upper() env['SERVER_PORT']=str(server.port) env['SERVER_NAME']=server.server_name env['SERVER_SOFTWARE']=server.SERVER_IDENT env['SERVER_PROTOCOL']="HTTP/"+request.version env['channel.creation_time']=request.channel.creation_time if self.uri_base=='/': env['SCRIPT_NAME']='' env['PATH_INFO']='/' + path else: env['SCRIPT_NAME'] = self.uri_base try: path_info=path.split(self.uri_base[1:],1)[1] except: path_info='' env['PATH_INFO']=path_info env['PATH_TRANSLATED']=ospath.normpath(ospath.join( workdir, env['PATH_INFO'])) if query: env['QUERY_STRING'] = query env['GATEWAY_INTERFACE']='CGI/1.1' env['REMOTE_ADDR']=request.channel.addr[0] # This is a really bad hack to support WebDAV # clients accessing documents through GET # on the HTTP port. We check if your WebDAV magic # machinery is enabled and if the client is recognized # as WebDAV client. If yes, we fake the environment # to pretend the ZPublisher to have a WebDAV request. # This sucks like hell but it works pretty fine ;-) if env['REQUEST_METHOD']=='GET': wdav_client_reg = getattr(sys,'WEBDAV_SOURCE_PORT_CLIENTS',None) if wdav_client_reg: agent = get_header(USER_AGENT,request.header) if wdav_client_reg(agent): env['WEBDAV_SOURCE_PORT'] = 1 path_info = env['PATH_INFO'] path_info = os.path.join(path_info,'manage_FTPget') path_info = os.path.normpath(path_info) if os.sep != '/': path_info = path_info.replace(os.sep,'/') env['PATH_INFO'] = path_info # If we're using a resolving logger, try to get the # remote host from the resolver's cache. if hasattr(server.logger, 'resolver'): dns_cache=server.logger.resolver.cache if dns_cache.has_key(env['REMOTE_ADDR']): remote_host=dns_cache[env['REMOTE_ADDR']][2] if remote_host is not None: env['REMOTE_HOST']=remote_host env_has=env.has_key for header in request.header: key,value=header.split(":",1) key=key.lower() value=value.strip() if h2ehas(key) and value: env[h2eget(key)]=value else: key='HTTP_%s' % ("_".join(key.split( "-"))).upper() if value and not env_has(key): env[key]=value env.update(self.env_override) return env
def done(self, *arg, **kw): """ I didn't want to override this, but there's no way around it in order to support deferreds - CM finalize this transaction - send output to the http channel""" # ---------------------------------------- # persistent connection management # ---------------------------------------- # --- BUCKLE UP! ---- connection = http_server.get_header(http_server.CONNECTION,self.header) connection = connection.lower() close_it = 0 wrap_in_chunking = 0 globbing = 1 if self.version == '1.0': if connection == 'keep-alive': if not 'Content-Length' in self: close_it = 1 else: self['Connection'] = 'Keep-Alive' else: close_it = 1 elif self.version == '1.1': if connection == 'close': close_it = 1 elif not 'Content-Length' in self: if 'Transfer-Encoding' in self: if not self['Transfer-Encoding'] == 'chunked': close_it = 1 elif self.use_chunked: self['Transfer-Encoding'] = 'chunked' wrap_in_chunking = 1 # globbing slows down tail -f output, so only use it if # we're not in chunked mode globbing = 0 else: close_it = 1 elif self.version is None: # Although we don't *really* support http/0.9 (because # we'd have to use \r\n as a terminator, and it would just # yuck up a lot of stuff) it's very common for developers # to not want to type a version number when using telnet # to debug a server. close_it = 1 outgoing_header = producers.simple_producer(self.build_reply_header()) if close_it: self['Connection'] = 'close' if wrap_in_chunking: outgoing_producer = deferring_chunked_producer( deferring_composite_producer(self.outgoing) ) # prepend the header outgoing_producer = deferring_composite_producer( [outgoing_header, outgoing_producer] ) else: # prepend the header self.outgoing.insert(0, outgoing_header) outgoing_producer = deferring_composite_producer(self.outgoing) # hook logging into the output outgoing_producer = deferring_hooked_producer(outgoing_producer, self.log) if globbing: outgoing_producer = deferring_globbing_producer(outgoing_producer) self.channel.push_with_producer(outgoing_producer) self.channel.current_request = None if close_it: self.channel.close_when_done()