def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) server_socket, port = bind_unused_port() server_socket.close() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != 'cygwin': _ERRNO_CONNREFUSED = (errno.ECONNREFUSED, ) if hasattr(errno, "WSAECONNREFUSED"): _ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED, ) # cygwin's errnos don't match those used on native windows python self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
def connect(self): self._loop = IOLoop.current() try: if self.unix_socket and self.host in ('localhost', '127.0.0.1'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.host_info = "Localhost via UNIX socket" address = self.unix_socket else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.host_info = "socket %s:%d" % (self.host, self.port) address = (self.host, self.port) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if self.no_delay: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock = IOStream(sock) sock.set_close_callback(self.stream_close_callback) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" if self.connect_timeout: def timeout(): if not self.socket: sock.close((None, IOError("connect timeout"), None)) self._loop.call_later(self.connect_timeout, timeout) def connected(future): if future._exc_info is not None: child_gr.throw(future.exception()) else: self.socket = sock child_gr.switch() future = sock.connect(address) self._loop.add_future(future, connected) main.switch() self._rfile = self.socket self._get_server_information() self._request_authentication() if self.sql_mode is not None: c = self.cursor() c.execute("SET sql_mode=%s", (self.sql_mode,)) if self.init_command is not None: c = self.cursor() c.execute(self.init_command) self.commit() if self.autocommit_mode is not None: self.autocommit(self.autocommit_mode) except Exception as e: if self.socket: self._rfile = None self.socket.close() self.socket = None raise err.OperationalError( 2003, "Can't connect to MySQL server on %s (%r)" % (self.unix_socket or ("%s:%s" % (self.host, self.port)), e))
class ForwardConnection(object): def __init__(self, remote_address, stream, address, headers): self.remote_address = remote_address self.stream = stream self.address = address self.headers = headers sock = socket.socket() self.remote_stream = IOStream(sock) self.remote_stream.connect(self.remote_address, self._on_remote_connected) self.remote_stream.set_close_callback(self._on_close) def _on_remote_write_complete(self): logging.info('send request to %s', self.remote_address) self.remote_stream.read_until_close(self._on_remote_read_close) def _on_remote_connected(self): logging.info('forward %r to %r', self.address, self.remote_address) self.remote_stream.write(self.headers, self._on_remote_write_complete) def _on_remote_read_close(self, data): self.stream.write(data, self.stream.close) def _on_close(self): logging.info('remote quit %s', self.remote_address) self.remote_stream.close()
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) stream.connect(('adomainthatdoesntexist.asdf', 54321)) self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True self.stop() stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): stream.connect(("127.0.0.1", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != "cygwin": _ERRNO_CONNREFUSED = (errno.ECONNREFUSED,) if hasattr(errno, "WSAECONNREFUSED"): _ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,) # cygwin's errnos don't match those used on native windows python self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
class Connection(object): def __init__(self, host, port, event_handler, stop_after=None, io_loop=None): self.host = host self.port = port self._event_handler = weakref.proxy(event_handler) self.timeout = stop_after self._stream = None self._io_loop = io_loop self.try_left = 2 self.in_progress = False self.read_queue = [] self.read_callbacks = [] def __del__(self): self.disconnect() def connect(self): if not self._stream: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self._stream = IOStream(sock, io_loop=self._io_loop) self._stream.set_close_callback(self.on_stream_close) self.connected() except socket.error, e: raise ConnectionError(str(e)) self.fire_event('on_connect')
def _maybe_connect(self, to_pid, callback=None): """Asynchronously establish a connection to the remote pid.""" callback = stack_context.wrap(callback or (lambda stream: None)) def streaming_callback(data): # we are not guaranteed to get an acknowledgment, but log and discard bytes if we do. log.info('Received %d bytes from %s, discarding.' % (len(data), to_pid)) log.debug(' data: %r' % (data, )) def on_connect(exit_cb, stream): log.info('Connection to %s established' % to_pid) with self._connection_callbacks_lock: self._connections[to_pid] = stream self.__dispatch_on_connect_callbacks(to_pid, stream) self.__loop.add_callback(stream.read_until_close, exit_cb, streaming_callback=streaming_callback) create = False with self._connection_callbacks_lock: stream = self._connections.get(to_pid) callbacks = self._connection_callbacks.get(to_pid) if not stream: self._connection_callbacks[to_pid].append(callback) if not callbacks: create = True if stream: self.__loop.add_callback(callback, stream) return if not create: return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if not sock: raise self.SocketError('Failed opening socket') stream = IOStream(sock, io_loop=self.__loop) stream.set_nodelay(True) stream.set_close_callback( partial(self.__on_exit, to_pid, b'reached end of stream')) connect_callback = partial(on_connect, partial(self.__on_exit, to_pid), stream) log.info('Establishing connection to %s' % to_pid) stream.connect((to_pid.ip, to_pid.port), callback=connect_callback) if stream.closed(): raise self.SocketError('Failed to initiate stream connection') log.info('Maybe connected to %s' % to_pid)
class Connection(object): def __init__(self, host = "localhost", port = 6379, timeout = None, io_loop = None): self.host = host self.port = port self._io_loop = io_loop self._stream = None self.in_porcess = False self.timeout = timeout self._lock = 0 self.info = {"db": 0, "pass":None} def __del__(self): self.disconnect() #Connect to Redis Server, use tornado.iostream.IOStream to progress write and read working def connect(self): if not self._stream: try: sock = socket.create_connection((self.host, self.port), timeout = self.timeout) sock.setsocketopt(socket.COL_TCP, socket.TCP_NODELAY, 1) self._stream = IOStream(sock, io_loop = self._io_loop) self._stream.set_close_callback(self.on_stream_close) self.info["db"] = 0 self.info["pass"] = None except socket.error as e: raise ConnectionError(e.message) #the operation when stram closing def on_stram_close(self): if self._stream: self.disconnect() #close the connection def disconnect(self): if self._stream: s = self._stream self._stream = None try: if s.socket: s.socket.shutdown(socket.SHUT_RDWR) s.close() except: pass #Write data @gen.coroutine def write(self, data): try: if nnot self._stream: self.disconnect() raise ConnectionError("Try to wrtie to non-exist Connection") if sys.version > "3": data = bytes(data, encoding = "utf-8") yield self._stream.write(data) except IOError as e: raise ConnectionError(e.message)
class Connection(object): def __init__(self, host, port, event_handler, stop_after=None, io_loop=None): self.host = host self.port = port self._event_handler = weakref.proxy(event_handler) self.timeout = stop_after self._stream = None self._io_loop = io_loop self.try_left = 2 self.in_progress = False self.read_callbacks = [] self.ready_callbacks = deque() def __del__(self): self.disconnect() def __enter__(self): return self def __exit__(self, *args, **kwargs): if self.ready_callbacks: # Pop a SINGLE callback from the queue and execute it. # The next one will be executed from the code # invoked by the callback callback = self.ready_callbacks.popleft() callback() def ready(self): return not self.read_callbacks and not self.ready_callbacks def wait_until_ready(self, callback=None): if callback: if not self.ready(): self.ready_callbacks.append(callback) else: callback() return self def connect(self): if not self._stream: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self._stream = IOStream(sock, io_loop=self._io_loop) self._stream.set_close_callback(self.on_stream_close) self.connected() except socket.error, e: raise ConnectionError(str(e)) self.fire_event('on_connect')
def _handle_accept(self, fd, events): connection, address = self._socket.accept() stream = IOStream(connection) host = "%s:%d" % address #host = ":".join(str(i) for i in address) self._streams[host] = stream ccb = functools.partial(self._handle_close, host) #same as: cb = lambda : self._handle_close(host) stream.set_close_callback(ccb) stream.read_until("\r\n", functools.partial(self._handle_read, host))
def _maybe_connect(self, to_pid, callback=None): """Asynchronously establish a connection to the remote pid.""" callback = stack_context.wrap(callback or (lambda stream: None)) def streaming_callback(data): # we are not guaranteed to get an acknowledgment, but log and discard bytes if we do. log.info('Received %d bytes from %s, discarding.' % (len(data), to_pid)) log.debug(' data: %r' % (data,)) def on_connect(exit_cb, stream): log.info('Connection to %s established' % to_pid) with self._connection_callbacks_lock: self._connections[to_pid] = stream self.__dispatch_on_connect_callbacks(to_pid, stream) self.__loop.add_callback( stream.read_until_close, exit_cb, streaming_callback=streaming_callback) create = False with self._connection_callbacks_lock: stream = self._connections.get(to_pid) callbacks = self._connection_callbacks.get(to_pid) if not stream: self._connection_callbacks[to_pid].append(callback) if not callbacks: create = True if stream: self.__loop.add_callback(callback, stream) return if not create: return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if not sock: raise self.SocketError('Failed opening socket') stream = IOStream(sock, io_loop=self.__loop) stream.set_nodelay(True) stream.set_close_callback(partial(self.__on_exit, to_pid, b'reached end of stream')) connect_callback = partial(on_connect, partial(self.__on_exit, to_pid), stream) log.info('Establishing connection to %s' % to_pid) stream.connect((to_pid.ip, to_pid.port), callback=connect_callback) if stream.closed(): raise self.SocketError('Failed to initiate stream connection') log.info('Maybe connected to %s' % to_pid)
class Connection(object): def __init__(self, host='localhost', port=6379, weak_event_handler=None, stop_after=None, io_loop=None): self.host = host self.port = port if weak_event_handler: self._event_handler = weak_event_handler else: self._event_handler = None self.timeout = stop_after self._stream = None self._io_loop = io_loop self.in_progress = False self.read_callbacks = [] self.ready_callbacks = deque() self._lock = 0 self.info = {'db': 0} def __del__(self): self.disconnect() def execute_pending_command(self): # Continue with the pending command execution # if all read operations are completed. if not self.read_callbacks and self.ready_callbacks: # Pop a SINGLE callback from the queue and execute it. # The next one will be executed from the code # invoked by the callback callback = self.ready_callbacks.popleft() callback() def ready(self): return (not self.read_callbacks and not self.ready_callbacks) def wait_until_ready(self, callback=None): if callback: if not self.ready(): self.ready_callbacks.append(callback) else: callback() def connect(self): if not self._stream: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self._stream = IOStream(sock, io_loop=self._io_loop) self._stream.set_close_callback(self.on_stream_close) self.info['db'] = 0 except socket.error, e: raise ConnectionError(str(e)) self.fire_event('on_connect')
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) # To reliably generate a gaierror we use a malformed domain name # instead of a name that's simply unlikely to exist (since # opendns and some ISPs return bogus addresses for nonexistent # domains instead of the proper error codes). stream.connect(('an invalid domain', 54321)) self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
def get_stream(self): "Return IOStream or SSLIOstream to VNC server" import socket import commands from tornado.iostream import IOStream sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(sock) stream.set_close_callback(self.on_close) stream.connect( ('127.0.0.1', self.get_vnc_port()), self.connected_to_vnc) return stream
class Flash(object): def __init__(self, close_callback=None): self._iostream = None self._close_callback = close_callback def connect(self, host='127.0.0.1', port=9999): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._iostream = IOStream(sock) self._iostream.set_close_callback(self._on_connection_close) # коннектимся и начинаем слушать команды self._iostream.connect((host, port), self._read_head) def close(self): self._on_connection_close() def _on_connection_close(self): self._iostream.close() if self._close_callback: self._close_callback() def _read_head(self): self._iostream.read_bytes(BaseCommand.meta_size, self._on_read_head) def _on_read_head(self, data): ctype, length = struct.unpack(">BH", data) if length: self._iostream.read_bytes(length, partial(self.execute_command, ctype)) else: self.execute_command(ctype) def execute_command(self, ctype, value=None): command = CommandsRegistry.get_by_type(ctype) if command is not None: command.execute(value) # else: # print 'unknown command: type={:#x}'.format(ctype) self._read_head() @classmethod def start(cls, host, port): flash = cls(close_callback=IOLoop.instance().stop) flash.connect(host, port) signal.signal(signal.SIGINT, flash.close) IOLoop.instance().start() IOLoop.instance().close()
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) port = get_unused_port() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called)
def connect(): logging.debug('testing rtsp netcam at %s' % url) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.settimeout(settings.MJPG_CLIENT_TIMEOUT) stream = IOStream(s) stream.set_close_callback(on_close) stream.connect((data['host'], int(data['port'])), on_connect) timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT), functools.partial(on_connect, _timeout=True)) return stream
class _HTTPConnection(simple_httpclient._HTTPConnection): def __init__(self, *args, **kwargs): self.source_address = kwargs.pop('source_address', None) super(_HTTPConnection, self).__init__(*args, **kwargs) def _on_resolve(self, addrinfo): af, sockaddr = addrinfo[0] if self.parsed.scheme == "https": ssl_options = {} if self.request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if self.request.ca_certs is not None: ssl_options["ca_certs"] = self.request.ca_certs else: ssl_options["ca_certs"] = simple_httpclient._DEFAULT_CA_CERTS if self.request.client_key is not None: ssl_options["keyfile"] = self.request.client_key if self.request.client_cert is not None: ssl_options["certfile"] = self.request.client_cert if sys.version_info >= (2, 7): ssl_options["ciphers"] = "DEFAULT:!SSLv2" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3 self.stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=self.max_buffer_size) else: self.stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) if self.source_address: self.stream.socket.bind(self.source_address) timeout = min(self.request.connect_timeout, self.request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(self._on_timeout)) self.stream.set_close_callback(self._on_close) # ipv6 addresses are broken (in self.parsed.hostname) until # 2.7, here is correctly parsed value calculated in __init__ self.stream.connect(sockaddr, self._on_connect, server_hostname=self.parsed_hostname)
class RemoteUpstream(Upstream): """ The most methods are the same in LocalUpstream, but maybe in future need to be diffrent. """ def initialize(self): self.socket = socket.socket(self._address_type, socket.SOCK_STREAM) self.stream = IOStream(self.socket) self.stream.set_close_callback(self.on_close) def do_connect(self): self.stream.connect(self.dest, self.on_connect) @property def address(self): return self.socket.getsockname() @property def address_type(self): return self._address_type def on_connect(self): self.connection_callback(self) on_finish = functools.partial(self.on_streaming_data, finished=True) self.stream.read_until_close(on_finish, self.on_streaming_data) def on_close(self): if self.stream.error: self.error_callback(self, self.stream.error) else: self.close_callback(self) def on_streaming_data(self, data, finished=False): if len(data): self.streaming_callback(self, data) def do_write(self, data): try: self.stream.write(data) except IOError as e: self.close() def do_close(self): if self.socket: logger.info("close upstream: %s:%s" % self.address) self.stream.close()
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) stream.set_close_callback(self.stop) with mock.patch('socket.socket.connect', side_effect=socket.gaierror(errno.EIO, 'boom')): with self.assertRaises(StreamClosedError): yield stream.connect(('localhost', 80)) self.assertTrue(isinstance(stream.error, socket.gaierror))
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) stream.set_close_callback(self.stop) with mock.patch("socket.socket.connect", side_effect=socket.gaierror(errno.EIO, "boom")): with self.assertRaises(StreamClosedError): yield stream.connect(("10.0.0.7", 80)) self.assertTrue(isinstance(stream.error, socket.gaierror))
class BeanstalktClient(beanstalkt.Client): def _reconnect(self): pass @gen.coroutine def connect(self): """Connect to beanstalkd server.""" if not self.closed(): return self._talking = False self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) self._stream = IOStream(self._socket) self._stream.set_close_callback(self._reconnect) yield self._stream.connect((self.host, self.port))
def _connect_to_node(self, host, data=None): try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) address = host.split(':') sock.connect(tuple((address[0], int(address[1])))) stream = IOStream(sock, io_loop=ioloop.IOLoop.instance()) self._streams[host] = stream stream.set_close_callback(functools.partial(self._handle_close, host)) self._streams[host].read_until("\r\n", functools.partial(self._handle_read, host)) if data: stream.write(data) except socket.error, e: a = 5
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) with mock.patch('socket.socket.connect', side_effect=socket.gaierror('boom')): with ExpectLog(gen_log, "Connect error"): stream.connect(('localhost', 80), callback=self.stop) self.wait() self.assertIsInstance(stream.error, socket.gaierror)
def test_connection_refused(self: typing.Any): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) stream = IOStream(socket.socket()) stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): with self.assertRaises(StreamClosedError): yield stream.connect(("127.0.0.1", port)) self.assertTrue(isinstance(stream.error, ConnectionRefusedError), stream.error)
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) stream.set_close_callback(self.stop) with mock.patch('socket.socket.connect', side_effect=socket.gaierror(errno.EIO, 'boom')): with ExpectLog(gen_log, "Connect error"): stream.connect(('localhost', 80), callback=self.stop) self.wait() self.assertIsInstance(stream.error, socket.gaierror)
class RemoteUpstream(Upstream): """ The most methods are the same in LocalUpstream, but maybe in future need to be diffrent. """ def initialize(self): self.socket = socket.socket(self._address_type, socket.SOCK_STREAM) self.stream = IOStream(self.socket) self.stream.set_close_callback(self.on_close) def do_connect(self): self.stream.connect(self.dest, self.on_connect) @property def address(self): return self.socket.getsockname() @property def address_type(self): return self._address_type def on_connect(self): self.connection_callback(self) on_finish = functools.partial(self.on_streaming_data, finished=True) self.stream.read_until_close(on_finish, self.on_streaming_data) def on_close(self): if self.stream.error: self.error_callback(self, self.stream.error) else: self.close_callback(self) def on_streaming_data(self, data, finished=False): if len(data): self.streaming_callback(self, data, finished) def do_write(self, data): try: self.stream.write(data) except IOError as e: self.close() def do_close(self): if self.socket: logger.debug("close upstream: %s" % self.socket) self.stream.close()
def handle_connection(self, connection, address): log.trace('IPCServer: Handling connection to address: %s', address) try: kwargs = {} if self.opts['ipc_write_buffer'] > 0: kwargs['max_write_buffer_size'] = self.opts['ipc_write_buffer'] log.trace('Setting IPC connection write buffer: %s', (self.opts['ipc_write_buffer'])) with salt.utils. async .current_ioloop(self.io_loop): stream = IOStream(connection, **kwargs) self.streams.add(stream) def discard_after_closed(): self.streams.discard(stream) stream.set_close_callback(discard_after_closed)
def connect(): if send_auth[0]: logging.debug('testing rtsp netcam at %s (this time with credentials)' % url) else: logging.debug('testing rtsp netcam at %s' % url) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.settimeout(settings.MJPG_CLIENT_TIMEOUT) stream = IOStream(s) stream.set_close_callback(on_close) stream.connect((host, int(port)), on_connect) timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT), functools.partial(on_connect, _timeout=True)) return stream
def _connect(self): host = self._env.config.host port = self._env.config.port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) def callback(): print 'connect ok, begin to recv data' self._recvData(stream) self._recvFromRelay(stream) def closeCallback(): print 'connection was closed!' stream.set_close_callback(closeCallback) stream.connect((host, port), callback)
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) port = get_unused_port() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != 'cygwin': # cygwin's errnos don't match those used on native windows python self.assertEqual(stream.error.args[0], errno.ECONNREFUSED)
def handle_connection(self, connection, address): log.trace('IPCServer: Handling connection to address: %s', address) try: if self.opts['ipc_write_buffer'] > 0: log.trace('Setting IPC connection write buffer: %s', (self.opts['ipc_write_buffer'])) stream = IOStream( connection, io_loop=self.io_loop, max_write_buffer_size=self.opts['ipc_write_buffer']) else: stream = IOStream(connection, io_loop=self.io_loop) self.streams.add(stream) def discard_after_closed(): self.streams.discard(stream) stream.set_close_callback(discard_after_closed) except Exception as exc: log.error('IPC streaming error: %s', exc)
class Client(object): """ Stupid simple client """ def __init__(self, database=0, ioloop=None): self._ioloop = ioloop or IOLoop.instance() self._database = database self._stream = None def connect(self, callback=None, host="localhost", port=6379): """ Connect to Redis """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self._socket = sock self._stream = IOStream(sock, io_loop=self._ioloop) self._stream.set_close_callback(self._close) self._stream.connect((host, port), callback=callback) def disconnect(self): """ Close connection to Redis. """ self._stream.close() def _close(self): """ Detect a close -- overwrite in sub classes if required """ pass def __getattr__(self, attr): """ Generate a request from the attribute """ command = attr.upper() if command not in VALID_COMMANDS: raise AttributeError("Invalid command %s" % command) if not self._stream: raise Exception("Cannot call command before connecting.") return Command(command, self) def send_message(self, message, response_class, callback): """ Send a message to Redis """ self._stream.write(message, self.write_callback) response_class(self._stream, callback) def write_callback(self, *args, **kwargs): """ Overwrite in sub classes, if required """ pass
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) stream = IOStream(socket.socket()) stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): with self.assertRaises(StreamClosedError): yield stream.connect(("127.0.0.1", port)) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != 'cygwin': _ERRNO_CONNREFUSED = (errno.ECONNREFUSED, ) if hasattr(errno, "WSAECONNREFUSED"): _ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED, ) # cygwin's errnos don't match those used on native windows python self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
def maybe_connect(self, to_pid, callback=None): """Synchronously open a connection to to_pid or return a connection if it exists.""" callback = stack_context.wrap(callback or (lambda stream: None)) def streaming_callback(data): # we are not guaranteed to get an acknowledgment, but log and discard bytes if we do. log.info('Received %d bytes from %s, discarding.' % (len(data), to_pid)) def on_connect(exit_cb, stream): log.info('Connection to %s established' % to_pid) self._connections[to_pid] = stream callback(stream) self.loop.add_callback(stream.read_until_close, exit_cb, streaming_callback=streaming_callback) stream = self._connections.get(to_pid) if stream is not None: callback(stream) return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if not sock: raise self.SocketError("Failed opening socket") # Set the socket non-blocking sock.setblocking(0) stream = IOStream(sock, io_loop=self.loop) stream.set_nodelay(True) stream.set_close_callback(partial(self.__on_exit, to_pid, b'closed from maybe_connect')) connect_callback = partial(on_connect, partial(self.__on_exit, to_pid), stream) log.info('Establishing connection to %s' % to_pid) stream.connect((to_pid.ip, to_pid.port), callback=connect_callback) if stream.closed(): raise self.SocketError("Failed to initiate stream connection") log.info('Maybe connected to %s' % to_pid)
class TCPLinkClientManager(object): def __init__(self, config): self.config = config self.stream = None self.creation_callback = None def setup(self): pass def create(self, callback): self.creation_callback = callback s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.stream = IOStream(s) self.stream.set_close_callback(self.on_close) self.stream.connect((self.config['host'], self.config['port']), self.on_connect) @tornado.gen.engine def on_connect(self): if self.stream: if self.creation_callback: link = TCPLink(self.stream) link.setup() yield tornado.gen.Task(link.establish) self.creation_callback(link) self.creation_callback = None self.stream = None def on_close(self): if self.stream: self.stream.set_close_callback(None) if self.stream.error: logging.error(self.stream.error) if self.creation_callback: self.creation_callback(None) self.creation_callback = None self.stream = None def cleanup(self): self.stream = None
class Client(Session): def __init__(self, protocol, io_loop=None): Session.__init__(self, protocol, io_loop) self.auto_reconnect = True self.reconnect_time = 5 self.connect_time = 5 def connect(self, address): self.status = SessionStream.CONNECTING sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.ios = IOStream(sock, self.io_loop) self.ios.set_close_callback(self._ios_closed) self.address = address self.clear_buffer() self.add_timer(self._connect_timeout, self.connect_time, "connect") self.ios.connect(address, self._connected) def _connected(self): self.status = SessionStream.CONNECTED self.protocol.connected(self) self.remove_timer("connect") self.ios.read_until_close(self._disconnected, self._receiver) def _connect_timeout(self): self.remove_timer("connect") self.status = SessionStream.IDLE self.ios.close() if self.auto_reconnect: self.add_timer(self._do_reconnect, self.reconnect_time, "reconnect") def _ios_closed(self): if self.status == SessionStream.CONNECTING: self.remove_timer("connect") if self.auto_reconnect: self.add_timer(self._do_reconnect, self.reconnect_time, "reconnect") def _do_reconnect(self): self.remove_timer("reconnect") self.connect(self.address)
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) server_socket, port = bind_unused_port() server_socket.close() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != "cygwin": # cygwin's errnos don't match those used on native windows python self.assertEqual(stream.error.args[0], errno.ECONNREFUSED)
class Connection(object): def __init__(self, host='localhost', port=6379, weak_event_handler=None, stop_after=None, io_loop=None): self.host = host self.port = port if weak_event_handler: self._event_handler = weak_event_handler else: self._event_handler = None self.timeout = stop_after self._stream = None self._io_loop = io_loop self.in_progress = False self.read_callbacks = [] self.ready_callbacks = deque() self._lock = 0 self.info = {} def __del__(self): self.disconnect() def __enter__(self): self._lock += 1 return self def __exit__(self, *args, **kwargs): self._lock -= 1 if not self._lock: self.continue_pending() def continue_pending(self): # Continue with the pending command execution # if all read operations are completed. if not self.read_callbacks and self.ready_callbacks: # Pop a SINGLE callback from the queue and execute it. # The next one will be executed from the code # invoked by the callback callback = self.ready_callbacks.popleft() callback() def ready(self): return (not self._lock and not self.read_callbacks and not self.ready_callbacks) def wait_until_ready(self, callback=None): if callback: if not self.ready(): self.ready_callbacks.append(callback) else: callback() return self def connect(self): if not self._stream: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self._stream = IOStream(sock, io_loop=self._io_loop) self._stream.set_close_callback(self.on_stream_close) except socket.error, e: raise ConnectionError(str(e)) self.fire_event('on_connect')
def connect(self): self._loop = IOLoop.current() try: if self.unix_socket and self.host in ('localhost', '127.0.0.1'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.host_info = "Localhost via UNIX socket" address = self.unix_socket else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.host_info = "socket %s:%d" % (self.host, self.port) address = (self.host, self.port) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock = IOStream(sock) sock.set_close_callback(self.stream_close_callback) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" if self.connect_timeout: def timeout(): if not self.socket: sock.close((None, IOError("connect timeout"), None)) self._loop.call_later(self.connect_timeout, timeout) def connected(future): if future._exc_info is not None: child_gr.throw(future.exception()) else: self.socket = sock child_gr.switch() future = sock.connect(address) self._loop.add_future(future, connected) main.switch() self._rfile = self.socket self._next_seq_id = 0 self._get_server_information() self._request_authentication() if self.sql_mode is not None: c = self.cursor() c.execute("SET sql_mode=%s", (self.sql_mode,)) if self.init_command is not None: c = self.cursor() c.execute(self.init_command) self.commit() if self.autocommit_mode is not None: self.autocommit(self.autocommit_mode) except Exception as e: if self.socket: self._rfile = None self.socket.close() self.socket = None exc = err.OperationalError( 2003, "Can't connect to MySQL server on %s (%r)" % (self.unix_socket or ("%s:%s" % (self.host, self.port)), e)) # Keep original exception and traceback to investigate error. exc.original_exception = e exc.traceback = traceback.format_exc() raise exc
class _RedisConnection(object): def __init__(self, io_loop, write_buf, final_callback, redis_tuple, redis_pass): """ :param io_loop: 你懂的 :param write_buf: 第一次写入 :param final_callback: resp赋值时调用 :param redis_tuple: (ip, port, db) :param redis_pass: redis密码 """ self.__io_loop = io_loop self.__final_cb = final_callback self.__stream = None #redis应答解析remain self.__recv_buf = '' self.__write_buf = write_buf init_buf = '' init_buf = chain_select_cmd(redis_tuple[2], init_buf) if redis_pass is None: self.__init_buf = (init_buf, ) else: assert redis_pass and isinstance(redis_pass, str) self.__init_buf = (redis_auth(redis_pass), init_buf) self.__haspass = redis_pass is not None self.__init_buf = ''.join(self.__init_buf) self.__connect_state = CONNECT_INIT #redis指令上下文, connect指令个数(AUTH, SELECT .etc),trans,cmd_count self.__cmd_env = deque() self.__written = False def connect(self, init_future, redis_tuple, active_trans, cmd_count): """ :param init_future: 第一个future对象 :param redis_tuple: (ip, port, db) :param active_trans: 事务是否激活 :param cmd_count: 指令个数 """ if self.__stream is not None: return #future, connect_count, transaction, cmd_count self.__cmd_env.append((init_future, 1 + int(self.__haspass), False, 0)) self.__cmd_env.append((init_future, 0, active_trans, cmd_count)) with ExceptionStackContext(self.__handle_ex): self.__stream = IOStream(socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0), io_loop=self.__io_loop) self.__stream.set_close_callback(self.__on_close) self.__stream.connect(redis_tuple[:2], self.__on_connect) self.__connect_state = CONNECT_ING def connect_state(self): return self.__connect_state def write(self, write_buf, new_future, include_select, active_trans, cmd_count, by_connect=False): """ :param new_future: 由于闭包的影响,在resp回调函数中会保存上一次的future对象,该对象必须得到更新 :param include_select: 是否包含SELECT指令 :param active_trans: 事务是否激活 :param cmd_count: 指令个数 """ if by_connect: self.__stream.write(self.__init_buf) self.__init_buf = None if self.__write_buf: self.__stream.write(self.__write_buf) self.__write_buf = None return self.__cmd_env.append( (new_future, int(include_select), active_trans, cmd_count)) if self.__connect_state == CONNECT_ING: self.__write_buf = ''.join((self.__write_buf, write_buf)) return if self.__write_buf: write_buf = ''.join((self.__write_buf, write_buf)) self.__stream.write(write_buf) self.__write_buf = None def __on_connect(self): """连接,只需要发送初始cmd即可 """ self.__connect_state = CONNECT_SUCC self.__stream.set_nodelay(True) self.write(None, None, None, None, None, True) self.__stream.read_until_close(None, self.__on_resp) def __on_resp(self, recv): """ :param recv: 收到的buf """ recv = ''.join((self.__recv_buf, recv)) idx = 0 for future, connect_count, trans, count in self.__cmd_env: ok, payload, recv = decode_resp_ondemand(recv, connect_count, trans, count) if not ok: break idx += 1 if count > 0: self.__run_callback({ _RESP_FUTURE: future, RESP_RESULT: payload }) self.__recv_buf = recv for _ in xrange(idx): self.__cmd_env.popleft() def __on_close(self): self.__connect_state = CONNECT_INIT if self.__final_cb: if self.__stream.error: self.__run_callback({RESP_ERR: self.__stream.error}) def __run_callback(self, resp): if self.__final_cb is None: return self.__io_loop.add_callback(self.__final_cb, resp) def __handle_ex(self, typ, value, tb): """ :param typ: 异常类型 """ if self.__final_cb: self.__run_callback({RESP_ERR: value}) return True return False
class Connection(RedisCommandsMixin): def __init__(self, redis, on_connect=None): logger.debug('Creating new Redis connection.') self.redis = redis self.reader = hiredis.Reader() self._watch = set() self._multi = False self.callbacks = deque() self._on_connect_callback = on_connect self.stream = IOStream( socket.socket(redis._family, socket.SOCK_STREAM, 0), io_loop=redis._ioloop ) self.stream.set_close_callback(self._on_close) self.stream.connect(redis._addr, self._on_connect) def _on_connect(self): logger.debug('Connected!') self.stream.read_until_close(self._on_close, self._on_read) self.redis._shared.append(self) if self._on_connect_callback is not None: self._on_connect_callback(self) self._on_connect_callback = None def _on_read(self, data): self.reader.feed(data) while True: resp = self.reader.gets() if resp is False: break callback = self.callbacks.popleft() if callback is not None: self.redis._ioloop.add_callback(partial(callback, resp)) def is_idle(self): return len(self.callbacks) == 0 def is_shared(self): return self in self.redis._shared def lock(self): if not self.is_shared(): raise Exception('Connection already is locked!') self.redis._shared.remove(self) def unlock(self, callback=None): def cb(resp): assert resp == 'OK' self.redis._shared.append(self) if self._multi: self.send_message(['DISCARD']) elif self._watch: self.send_message(['UNWATCH']) self.send_message(['SELECT', self.redis._database], cb) def send_message(self, args, callback=None): command = args[0] if 'SUBSCRIBE' in command: raise NotImplementedError('Not yet.') # Do not allow the commands, affecting the execution of other commands, # to be used on shared connection. if command in ('WATCH', 'MULTI'): if self.is_shared(): raise Exception('Command %s is not allowed while connection ' 'is shared!' % command) if command == 'WATCH': self._watch.add(args[1]) if command == 'MULTI': self._multi = True # monitor transaction state, to unlock correctly if command in ('EXEC', 'DISCARD', 'UNWATCH'): if command in ('EXEC', 'DISCARD'): self._multi = False self._watch.clear() self.stream.write(self.format_message(args)) future = Future() if callback is not None: future.add_done_callback(stack_context.wrap(callback)) self.callbacks.append(future.set_result) return future def format_message(self, args): l = "*%d" % len(args) lines = [l.encode('utf-8')] for arg in args: if not isinstance(arg, str): arg = str(arg) arg = arg.encode('utf-8') l = "$%d" % len(arg) lines.append(l.encode('utf-8')) lines.append(arg) lines.append(b"") return b"\r\n".join(lines) def close(self): self.send_command(['QUIT']) if self.is_shared(): self.lock() def _on_close(self, data=None): logger.debug('Redis connection was closed.') if data is not None: self._on_read(data) if self.is_shared(): self.lock()
class Connection(FrameHandler): """A "physical" TCP connection to the AMQP server heartbeat: int, optional the requested time interval in seconds for heartbeat frames. Connection.on_error callback, when set, is called in case of "hard" AMQP Error. It receives a ConnectionErrorinstance as argument: def handle_error(conn_error): print conn_error.method print conn_error.reply_code conn.on_error = handle_error Connection.on_disconnect callback, when set, is called in case of heartbeat timeout or TCP low level disconnection. It receives no args. """ def __init__(self, host, username='******', password='******', vhost='/', port=5672, heartbeat=0, io_loop=None): self.host = host self.port = port self.username = username self.password = password self.vhost = vhost self.heartbeat = heartbeat self.last_received_frame = None self.frame_max = 0 self.io_loop = io_loop or IOLoop.instance() self.stream = None self.channels = {0: self} self.last_channel_id = 0 self.channel_id = 0 self.on_connect = None self.on_disconnect = None self.on_error = None self._close_callback = None self._frame_count = 0 super(Connection, self).__init__(connection=self) def connect(self, callback, close_callback=None): """open the connection to the server""" if self.status is not status.CLOSED: raise AmqpStatusError('Connection status is %s' % self.status) self.status = status.OPENING sock = socket.socket() sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) self.on_connect = callback self.on_disconnect = close_callback if TORNADO_1_2: self.stream = IOStream(sock, io_loop=self.io_loop) self.stream.set_close_callback(self.on_closed_stream) self.stream.connect((self.host, self.port), self._handshake) else: sock.connect((self.host, self.port)) self.stream = IOStream(sock, io_loop=self.io_loop) self.stream.set_close_callback(self.on_closed_stream) self._handshake() def close(self, callback=None): """cleanly closes the connection to the server. all pending tasks are flushed before connection shutdown""" if self.status not in (status.OPENING, status.OPENED): raise AmqpStatusError('connection is not open') self._close_callback = callback self.status = status.CLOSING for ch in self.channels.values(): if ch is not self and ch.status in (status.OPENING, status.OPENED): ch.close(self.close) m = Close(reply_code=0, reply_text='', class_id=0, method_id=0) self.send_method(m, self._close_callback) def _get_next_channel_id(self): if len(self.channels) == 0x10000: raise AmqpError('max channels per connection exceeded') next_id = self.last_channel_id while True: next_id = (next_id + 1) % 0x10000 if next_id not in self.channels: break self.last_channel_id = next_id return next_id def channel(self, callback=None): """get a Channel instance""" if self.status == status.OPENED: ch_id = self._get_next_channel_id() ch = Channel(channel_id=ch_id, conn=self) self.channels[ch_id] = ch ch._open(callback) return ch else: raise AmqpStatusError('connection is not opened') def _handshake(self): self.stream.write('AMQP\x00\x00\x09\x01') FrameReader(self.stream, self._frame_loop) def _frame_loop(self, frame): if self.heartbeat: self.last_received_frame = time.time() self.channels[frame.channel].process_frame(frame) self._frame_count += 1 if self.stream: # Every 5 frames ioloop gets the control back in order # to avoid hitting the recursion limit # reading one frame cost 13 levels of stack recursion # TODO check if always using _callbacks is faster that frame # counting if self._frame_count == 5: self._frame_count = 0 cb = lambda: FrameReader(self.stream, self._frame_loop) self._add_ioloop_callback(cb) else: FrameReader(self.stream, self._frame_loop) if TORNADO_1_2: def _add_ioloop_callback(self, callback): self.io_loop._callbacks.append(callback) else: def _add_ioloop_callback(self, callback): self.io_loop._callbacks.add(callback) def close_stream(self): if self.stream is None: return try: self.stream.close() finally: self.stream = None def on_closed_stream(self): if self.status == status.CLOSED: return self.status = status.CLOSED if self.on_disconnect: try: self.on_disconnect() except Exception: logger.error('ERROR in on_disconnect() callback', exc_info=True) def reset(self): for c in self.channels.values(): if c is not self: c.reset() super(Connection, self).reset() self.close_stream()
class _HTTPConnection(object): _SUPPORTED_METHODS = set( ["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) def __init__(self, io_loop, client, request, release_callback, final_callback, max_buffer_size, resolver): self.start_time = io_loop.time() self.io_loop = io_loop self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback self.max_buffer_size = max_buffer_size self.resolver = resolver self.code = None self.headers = None self.chunks = None self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None with stack_context.ExceptionStackContext(self._handle_exception): self.parsed = urlparse.urlsplit(_unicode(self.request.url)) if self.parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % self.request.url) # urlsplit results have hostname and port results, but they # didn't support ipv6 literals until python 2.7. netloc = self.parsed.netloc if "@" in netloc: userpass, _, netloc = netloc.rpartition("@") match = re.match(r'^(.+):(\d+)$', netloc) if match: host = match.group(1) port = int(match.group(2)) else: host = netloc port = 443 if self.parsed.scheme == "https" else 80 if re.match(r'^\[.*\]$', host): # raw ipv6 addresses in urls are enclosed in brackets host = host[1:-1] self.parsed_hostname = host # save final host for _on_connect if request.allow_ipv6: af = socket.AF_UNSPEC else: # We only try the first IP we get from getaddrinfo, # so restrict to ipv4 by default. af = socket.AF_INET self.resolver.resolve(host, port, af, callback=self._on_resolve) def _on_resolve(self, addrinfo): af, sockaddr = addrinfo[0] if self.parsed.scheme == "https": ssl_options = {} if self.request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if self.request.ca_certs is not None: ssl_options["ca_certs"] = self.request.ca_certs else: ssl_options["ca_certs"] = _DEFAULT_CA_CERTS if self.request.client_key is not None: ssl_options["keyfile"] = self.request.client_key if self.request.client_cert is not None: ssl_options["certfile"] = self.request.client_cert # SSL interoperability is tricky. We want to disable # SSLv2 for security reasons; it wasn't disabled by default # until openssl 1.0. The best way to do this is to use # the SSL_OP_NO_SSLv2, but that wasn't exposed to python # until 3.2. Python 2.7 adds the ciphers argument, which # can also be used to disable SSLv2. As a last resort # on python 2.6, we set ssl_version to SSLv3. This is # more narrow than we'd like since it also breaks # compatibility with servers configured for TLSv1 only, # but nearly all servers support SSLv3: # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html if sys.version_info >= (2, 7): ssl_options["ciphers"] = "DEFAULT:!SSLv2" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3 self.stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=self.max_buffer_size) else: self.stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) timeout = min(self.request.connect_timeout, self.request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(self._on_timeout)) self.stream.set_close_callback(self._on_close) # ipv6 addresses are broken (in self.parsed.hostname) until # 2.7, here is correctly parsed value calculated in __init__ self.stream.connect(sockaddr, self._on_connect, server_hostname=self.parsed_hostname) def _on_timeout(self): self._timeout = None if self.final_callback is not None: raise HTTPError(599, "Timeout") def _remove_timeout(self): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def _on_connect(self): self._remove_timeout() if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, stack_context.wrap(self._on_timeout)) if (self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods): raise KeyError("unknown method %s" % self.request.method) for key in ('network_interface', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password'): if getattr(self.request, key, None): raise NotImplementedError('%s not supported' % key) if "Connection" not in self.request.headers: self.request.headers["Connection"] = "close" if "Host" not in self.request.headers: if '@' in self.parsed.netloc: self.request.headers["Host"] = self.parsed.netloc.rpartition( '@')[-1] else: self.request.headers["Host"] = self.parsed.netloc username, password = None, None if self.parsed.username is not None: username, password = self.parsed.username, self.parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password or '' if username is not None: auth = utf8(username) + b":" + utf8(password) self.request.headers["Authorization"] = (b"Basic " + base64.b64encode(auth)) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent if not self.request.allow_nonstandard_methods: if self.request.method in ("POST", "PATCH", "PUT"): assert self.request.body is not None else: assert self.request.body is None if self.request.body is not None: self.request.headers["Content-Length"] = str(len( self.request.body)) if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers[ "Content-Type"] = "application/x-www-form-urlencoded" if self.request.use_gzip: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((self.parsed.path or '/') + (('?' + self.parsed.query) if self.parsed.query else '')) request_lines = [ utf8("%s %s HTTP/1.1" % (self.request.method, req_path)) ] for k, v in self.request.headers.get_all(): line = utf8(k) + b": " + utf8(v) if b'\n' in line: raise ValueError('Newline in header: ' + repr(line)) request_lines.append(line) self.stream.write(b"\r\n".join(request_lines) + b"\r\n\r\n") if self.request.body is not None: self.stream.write(self.request.body) self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers) def _release(self): if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None release_callback() def _run_callback(self, response): self._release() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None self.io_loop.add_callback(final_callback, response) def _handle_exception(self, typ, value, tb): if self.final_callback: self._remove_timeout() gen_log.warning("uncaught exception", exc_info=(typ, value, tb)) self._run_callback( HTTPResponse( self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, )) if hasattr(self, "stream"): self.stream.close() return True else: # If our callback has already been called, we are probably # catching an exception that is not caused by us but rather # some child of our callback. Rather than drop it on the floor, # pass it along. return False def _on_close(self): if self.final_callback is not None: message = "Connection closed" if self.stream.error: message = str(self.stream.error) raise HTTPError(599, message) def _handle_1xx(self, code): self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers) def _on_headers(self, data): data = native_str(data.decode("latin1")) first_line, _, header_data = data.partition("\n") match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line) assert match code = int(match.group(1)) self.headers = HTTPHeaders.parse(header_data) if 100 <= code < 200: self._handle_1xx(code) return else: self.code = code self.reason = match.group(2) if "Content-Length" in self.headers: if "," in self.headers["Content-Length"]: # Proxies sometimes cause Content-Length headers to get # duplicated. If all the values are identical then we can # use them but if they differ it's an error. pieces = re.split(r',\s*', self.headers["Content-Length"]) if any(i != pieces[0] for i in pieces): raise ValueError("Multiple unequal Content-Lengths: %r" % self.headers["Content-Length"]) self.headers["Content-Length"] = pieces[0] content_length = int(self.headers["Content-Length"]) else: content_length = None if self.request.header_callback is not None: # re-attach the newline we split on earlier self.request.header_callback(first_line + _) for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback('\r\n') if self.request.method == "HEAD" or self.code == 304: # HEAD requests and 304 responses never have content, even # though they may have content-length headers self._on_body(b"") return if 100 <= self.code < 200 or self.code == 204: # These response codes never have bodies # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 if ("Transfer-Encoding" in self.headers or content_length not in (None, 0)): raise ValueError("Response with code %d should not have body" % self.code) self._on_body(b"") return if (self.request.use_gzip and self.headers.get("Content-Encoding") == "gzip"): self._decompressor = GzipDecompressor() if self.headers.get("Transfer-Encoding") == "chunked": self.chunks = [] self.stream.read_until(b"\r\n", self._on_chunk_length) elif content_length is not None: self.stream.read_bytes(content_length, self._on_body) else: self.stream.read_until_close(self._on_body) def _on_body(self, data): self._remove_timeout() original_request = getattr(self.request, "original_request", self.request) if (self.request.follow_redirects and self.request.max_redirects > 0 and self.code in (301, 302, 303, 307)): assert isinstance(self.request, _RequestProxy) new_request = copy.copy(self.request.request) new_request.url = urlparse.urljoin(self.request.url, self.headers["Location"]) new_request.max_redirects = self.request.max_redirects - 1 del new_request.headers["Host"] # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 # Client SHOULD make a GET request after a 303. # According to the spec, 302 should be followed by the same # method as the original request, but in practice browsers # treat 302 the same as 303, and many servers use 302 for # compatibility with pre-HTTP/1.1 user agents which don't # understand the 303 status. if self.code in (302, 303): new_request.method = "GET" new_request.body = None for h in [ "Content-Length", "Content-Type", "Content-Encoding", "Transfer-Encoding" ]: try: del self.request.headers[h] except KeyError: pass new_request.original_request = original_request final_callback = self.final_callback self.final_callback = None self._release() self.client.fetch(new_request, final_callback) self.stream.close() return if self._decompressor: data = (self._decompressor.decompress(data) + self._decompressor.flush()) if self.request.streaming_callback: if self.chunks is None: # if chunks is not None, we already called streaming_callback # in _on_chunk_data self.request.streaming_callback(data) buffer = BytesIO() else: buffer = BytesIO(data) # TODO: don't require one big string? response = HTTPResponse(original_request, self.code, reason=self.reason, headers=self.headers, request_time=self.io_loop.time() - self.start_time, buffer=buffer, effective_url=self.request.url) self._run_callback(response) self.stream.close() def _on_chunk_length(self, data): # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 length = int(data.strip(), 16) if length == 0: if self._decompressor is not None: tail = self._decompressor.flush() if tail: # I believe the tail will always be empty (i.e. # decompress will return all it can). The purpose # of the flush call is to detect errors such # as truncated input. But in case it ever returns # anything, treat it as an extra chunk if self.request.streaming_callback is not None: self.request.streaming_callback(tail) else: self.chunks.append(tail) # all the data has been decompressed, so we don't need to # decompress again in _on_body self._decompressor = None self._on_body(b''.join(self.chunks)) else: self.stream.read_bytes( length + 2, # chunk ends with \r\n self._on_chunk_data) def _on_chunk_data(self, data): assert data[-2:] == b"\r\n" chunk = data[:-2] if self._decompressor: chunk = self._decompressor.decompress(chunk) if self.request.streaming_callback is not None: self.request.streaming_callback(chunk) else: self.chunks.append(chunk) self.stream.read_until(b"\r\n", self._on_chunk_length)
class PushMessageReceiver(object): def __init__(self): self.connected = False self._stream = None self.address = None self.family = None self.delayed_call = None self._registered_handlers = {} def register_message_handler(self, message_type, handler): self._registered_handlers[message_type] = handler def unregister_message_handler(self, message_type): try: del self._registered_handlers[message_type] except KeyError: pass def unregister_all(self): self._registered_handlers = {} def connect(self, address, family=socket.AF_INET, retry_interval=1): self.address = address self.family = family self.retry_interval = retry_interval self._connect() def _connect(self): if self.connected: if self.delayed_call: IOLoop.current().remove_timeout(self.delayed_call) self.delayed_call = None else: raise RuntimeError("Already connected") else: self.delayed_call = IOLoop.current().call_later(1, self._connect) stream_socket = socket.socket(family=self.family) self._stream = IOStream(stream_socket) self._stream.connect(self.address, self._on_connect) def _on_connect(self): self.connected = True self._stream.set_close_callback(self._on_closed) self._stream.read_until('\n', self._handle_greeting) def _on_closed(self): if self.connected: # The other side closed us, try to reconnect self.connected = False self.connect(self.address, self.family) def _handle_greeting(self, greetings): self._stream.read_until('\n', self._handle_message) def _handle_message(self, raw_message): message = json.loads(raw_message.strip()) try: handler = self._registered_handlers[message['type']] handler(message['content']) except KeyError: pass finally: # continue getting messages as long as we are connected if self.connected: self._stream.read_until('\n', self._handle_message) def close(self): if self._stream: self.connected = False self._stream.close() self._stream = None
class _HTTPConnection(object): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) def __init__(self, io_loop, client, request, release_callback, final_callback, max_buffer_size): self.start_time = time.time() self.io_loop = io_loop self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback self.code = None self.headers = None self.chunks = None self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None with stack_context.StackContext(self.cleanup): parsed = urlparse.urlsplit(_unicode(self.request.url)) if ssl is None and parsed.scheme == "https": raise ValueError("HTTPS requires either python2.6+ or " "curl_httpclient") if parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % self.request.url) # urlsplit results have hostname and port results, but they # didn't support ipv6 literals until python 2.7. netloc = parsed.netloc if "@" in netloc: userpass, _, netloc = netloc.rpartition("@") match = re.match(r'^(.+):(\d+)$', netloc) if match: host = match.group(1) port = int(match.group(2)) else: host = netloc port = 443 if parsed.scheme == "https" else 80 if re.match(r'^\[.*\]$', host): # raw ipv6 addresses in urls are enclosed in brackets host = host[1:-1] parsed_hostname = host # save final parsed host for _on_connect if self.client.hostname_mapping is not None: host = self.client.hostname_mapping.get(host, host) if request.allow_ipv6: af = socket.AF_UNSPEC else: # We only try the first IP we get from getaddrinfo, # so restrict to ipv4 by default. af = socket.AF_INET addrinfo = socket.getaddrinfo(host, port, af, socket.SOCK_STREAM, 0, 0) af, socktype, proto, canonname, sockaddr = addrinfo[0] if parsed.scheme == "https": ssl_options = {} if request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if request.ca_certs is not None: ssl_options["ca_certs"] = request.ca_certs else: ssl_options["ca_certs"] = _DEFAULT_CA_CERTS if request.client_key is not None: ssl_options["keyfile"] = request.client_key if request.client_cert is not None: ssl_options["certfile"] = request.client_cert # SSL interoperability is tricky. We want to disable # SSLv2 for security reasons; it wasn't disabled by default # until openssl 1.0. The best way to do this is to use # the SSL_OP_NO_SSLv2, but that wasn't exposed to python # until 3.2. Python 2.7 adds the ciphers argument, which # can also be used to disable SSLv2. As a last resort # on python 2.6, we set ssl_version to SSLv3. This is # more narrow than we'd like since it also breaks # compatibility with servers configured for TLSv1 only, # but nearly all servers support SSLv3: # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html if sys.version_info >= (2, 7): ssl_options["ciphers"] = "DEFAULT:!SSLv2" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3 self.stream = SSLIOStream(socket.socket(af, socktype, proto), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size) else: self.stream = IOStream(socket.socket(af, socktype, proto), io_loop=self.io_loop, max_buffer_size=max_buffer_size) timeout = min(request.connect_timeout, request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, self._on_timeout) self.stream.set_close_callback(self._on_close) self.stream.connect(sockaddr, functools.partial(self._on_connect, parsed, parsed_hostname)) def _on_timeout(self): self._timeout = None self._run_callback(HTTPResponse(self.request, 599, request_time=time.time() - self.start_time, error=HTTPError(599, "Timeout"))) self.stream.close() def _on_connect(self, parsed, parsed_hostname): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, self._on_timeout) if (self.request.validate_cert and isinstance(self.stream, SSLIOStream)): match_hostname(self.stream.socket.getpeercert(), # ipv6 addresses are broken (in # parsed.hostname) until 2.7, here is # correctly parsed value calculated in # __init__ parsed_hostname) if (self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods): raise KeyError("unknown method %s" % self.request.method) for key in ('network_interface', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password'): if getattr(self.request, key, None): raise NotImplementedError('%s not supported' % key) if "Connection" not in self.request.headers: self.request.headers["Connection"] = "close" if "Host" not in self.request.headers: if '@' in parsed.netloc: self.request.headers["Host"] = parsed.netloc.rpartition('@')[-1] else: self.request.headers["Host"] = parsed.netloc username, password = None, None if parsed.username is not None: username, password = parsed.username, parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password or '' if username is not None: auth = utf8(username) + b(":") + utf8(password) self.request.headers["Authorization"] = (b("Basic ") + base64.b64encode(auth)) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent if not self.request.allow_nonstandard_methods: if self.request.method in ("POST", "PATCH", "PUT"): assert self.request.body is not None else: assert self.request.body is None if self.request.body is not None: self.request.headers["Content-Length"] = str(len( self.request.body)) if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" if self.request.use_gzip: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((parsed.path or '/') + (('?' + parsed.query) if parsed.query else '')) request_lines = [utf8("%s %s HTTP/1.1" % (self.request.method, req_path))] for k, v in self.request.headers.get_all(): line = utf8(k) + b(": ") + utf8(v) if b('\n') in line: raise ValueError('Newline in header: ' + repr(line)) request_lines.append(line) self.stream.write(b("\r\n").join(request_lines) + b("\r\n\r\n")) if self.request.body is not None: self.stream.write(self.request.body) self.stream.read_until_regex(b("\r?\n\r?\n"), self._on_headers) def _release(self): if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None release_callback() def _run_callback(self, response): self._release() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None final_callback(response) @contextlib.contextmanager def cleanup(self): try: yield except Exception, e: #logging.warning("uncaught exception", exc_info=True) self._run_callback(HTTPResponse(self.request, 599, error=e, request_time=time.time() - self.start_time, )) if hasattr(self, "stream"): self.stream.close()
class Connection(object): def __init__(self, host='localhost', port=6379, unix_socket_path=None, event_handler_proxy=None, stop_after=None, io_loop=None): self.host = host self.port = port self.unix_socket_path = unix_socket_path self._event_handler = event_handler_proxy self.timeout = stop_after self._stream = None self._io_loop = io_loop self.in_progress = False self.read_callbacks = set() self.ready_callbacks = deque() self._lock = 0 self.info = {'db': 0, 'pass': None} def __del__(self): self.disconnect() def execute_pending_command(self): # Continue with the pending command execution # if all read operations are completed. if not self.read_callbacks and self.ready_callbacks: # Pop a SINGLE callback from the queue and execute it. # The next one will be executed from the code # invoked by the callback callback = self.ready_callbacks.popleft() callback() def ready(self): return (not self.read_callbacks and not self.ready_callbacks) def wait_until_ready(self, callback=None): if callback: if not self.ready(): callback = stack_context.wrap(callback) self.ready_callbacks.append(callback) else: callback() def connect(self): if not self._stream: try: if self.unix_socket_path: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.timeout) sock.connect(self.unix_socket_path) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.settimeout(self.timeout) sock.connect((self.host, self.port)) self._stream = IOStream(sock, io_loop=self._io_loop) self._stream.set_close_callback(self.on_stream_close) self.info['db'] = 0 self.info['pass'] = None except socket.error as e: raise ConnectionError(str(e)) self.fire_event('on_connect') def on_stream_close(self): if self._stream: self.disconnect() callbacks = self.read_callbacks self.read_callbacks = set() for callback in callbacks: callback() def disconnect(self): if self._stream: s = self._stream self._stream = None try: if s.socket: s.socket.shutdown(socket.SHUT_RDWR) s.close() except: pass def fire_event(self, event): event_handler = self._event_handler if event_handler: try: getattr(event_handler, event)() except AttributeError: pass def write(self, data, callback=None): if not self._stream: raise ConnectionError('Tried to write to ' 'non-existent connection') if callback: callback = stack_context.wrap(callback) _callback = lambda: callback(None) self.read_callbacks.add(_callback) cb = partial(self.read_callback, _callback) else: cb = None try: if PY3: data = bytes(data, encoding='utf-8') self._stream.write(data, callback=cb) except IOError as e: self.disconnect() raise ConnectionError(e.message) def read(self, length, callback=None): try: if not self._stream: self.disconnect() raise ConnectionError('Tried to read from ' 'non-existent connection') callback = stack_context.wrap(callback) self.read_callbacks.add(callback) self._stream.read_bytes(length, callback=partial(self.read_callback, callback)) except IOError: self.fire_event('on_disconnect') def read_callback(self, callback, *args, **kwargs): try: self.read_callbacks.remove(callback) except KeyError: pass callback(*args, **kwargs) def readline(self, callback=None): try: if not self._stream: self.disconnect() raise ConnectionError('Tried to read from ' 'non-existent connection') callback = stack_context.wrap(callback) self.read_callbacks.add(callback) callback = partial(self.read_callback, callback) self._stream.read_until(CRLF, callback=callback) except IOError: self.fire_event('on_disconnect') def connected(self): if self._stream: return True return False
class TornadoClient(Client): """A non-blocking Pomelo client by tornado ioloop Usage : class ClientHandler(object) : def on_recv_data(self, client, proto_type, data) : print "recv_data..." return data def on_connected(self, client, user_data) : print "connect..." client.send_heartbeat() def on_disconnect(self, client) : print "disconnect..." def on_heartbeat(self, client) : print "heartbeat..." send request ... def on_response(self, client, route, request, response) : print "response..." def on_push(self, client, route, push_data) : print "notify..." handler = ClientHandler() client = TornadoClient(handler) client.connect(host, int(port)) client.run() tornado.ioloop.IOLoop.current().start() """ def __init__(self, handler): self.socket = socket(AF_INET, SOCK_STREAM) self.iostream = None self.protocol_package = None super(TornadoClient, self).__init__(handler) def connect(self, host, port): self.iostream = IOStream(self.socket) self.iostream.set_close_callback(self.on_close) self.iostream.connect((host, port), self.on_connect) def on_connect(self): self.send_sync() self.on_data() def on_close(self): if hasattr(self.handler, 'on_disconnect'): self.handler.on_disconnect(self) def send(self, data): assert not self.iostream.closed(), "iostream has closed" if not isinstance(data, bytes): data = bytes(data) self.iostream.write(data) def on_data(self): assert not self.iostream.closed(), "iostream has closed" if None is self.protocol_package or self.protocol_package.completed(): self.iostream.read_bytes(4, self.on_head) def on_head(self, head): self.protocol_package = Protocol.unpack(head) self.iostream.read_bytes(self.protocol_package.length, self.on_body) def on_body(self, body): if hasattr(self.handler, 'on_recv_data'): body = self.handler.on_recv_data(self, self.protocol_package.proto_type, body) self.protocol_package.append(body) self.on_protocol(self.protocol_package) self.on_data() def close(self): if self.iostream: self.iostream.close()
class _HTTPConnection(object): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE"]) def __init__(self, io_loop, client, request, release_callback, final_callback, max_buffer_size): self.start_time = time.time() self.io_loop = io_loop self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback self.code = None self.headers = None self.chunks = None self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None with stack_context.StackContext(self.cleanup): parsed = urlparse.urlsplit(_unicode(self.request.url)) if ssl is None and parsed.scheme == "https": raise ValueError("HTTPS requires either python2.6+ or " "curl_httpclient") if parsed.scheme not in ("http", "https"): raise ValueError("Unsupported url scheme: %s" % self.request.url) # urlsplit results have hostname and port results, but they # didn't support ipv6 literals until python 2.7. netloc = parsed.netloc if "@" in netloc: userpass, _, netloc = netloc.rpartition("@") match = re.match(r'^(.+):(\d+)$', netloc) if match: host = match.group(1) port = int(match.group(2)) else: host = netloc port = 443 if parsed.scheme == "https" else 80 if re.match(r'^\[.*\]$', host): # raw ipv6 addresses in urls are enclosed in brackets host = host[1:-1] if self.client.hostname_mapping is not None: host = self.client.hostname_mapping.get(host, host) if request.allow_ipv6: af = socket.AF_UNSPEC else: # We only try the first IP we get from getaddrinfo, # so restrict to ipv4 by default. af = socket.AF_INET addrinfo = socket.getaddrinfo(host, port, af, socket.SOCK_STREAM, 0, 0) af, socktype, proto, canonname, sockaddr = addrinfo[0] if parsed.scheme == "https": ssl_options = {} if request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if request.ca_certs is not None: ssl_options["ca_certs"] = request.ca_certs else: ssl_options["ca_certs"] = _DEFAULT_CA_CERTS if request.client_key is not None: ssl_options["keyfile"] = request.client_key if request.client_cert is not None: ssl_options["certfile"] = request.client_cert self.stream = SSLIOStream(socket.socket(af, socktype, proto), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size) else: self.stream = IOStream(socket.socket(af, socktype, proto), io_loop=self.io_loop, max_buffer_size=max_buffer_size) timeout = min(request.connect_timeout, request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, self._on_timeout) self.stream.set_close_callback(self._on_close) self.stream.connect(sockaddr, functools.partial(self._on_connect, parsed)) def _on_timeout(self): self._timeout = None self._run_callback(HTTPResponse(self.request, 599, request_time=time.time() - self.start_time, error=HTTPError(599, "Timeout"))) self.stream.close() def _on_connect(self, parsed): if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, self._on_timeout) if (self.request.validate_cert and isinstance(self.stream, SSLIOStream)): match_hostname(self.stream.socket.getpeercert(), parsed.hostname) if (self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods): raise KeyError("unknown method %s" % self.request.method) for key in ('network_interface', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password'): if getattr(self.request, key, None): raise NotImplementedError('%s not supported' % key) if "Host" not in self.request.headers: self.request.headers["Host"] = parsed.netloc username, password = None, None if parsed.username is not None: username, password = parsed.username, parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password if username is not None: auth = utf8(username) + b(":") + utf8(password) self.request.headers["Authorization"] = (b("Basic ") + base64.b64encode(auth)) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent if not self.request.allow_nonstandard_methods: if self.request.method in ("POST", "PUT"): assert self.request.body is not None else: assert self.request.body is None if self.request.body is not None: self.request.headers["Content-Length"] = str(len( self.request.body)) if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" if self.request.use_gzip: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((parsed.path or '/') + (('?' + parsed.query) if parsed.query else '')) request_lines = [utf8("%s %s HTTP/1.1" % (self.request.method, req_path))] for k, v in self.request.headers.get_all(): line = utf8(k) + b(": ") + utf8(v) if b('\n') in line: raise ValueError('Newline in header: ' + repr(line)) request_lines.append(line) self.stream.write(b("\r\n").join(request_lines) + b("\r\n\r\n")) if self.request.body is not None: self.stream.write(self.request.body) self.stream.read_until_regex(b("\r?\n\r?\n"), self._on_headers) def _release(self): if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None release_callback() def _run_callback(self, response): self._release() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None final_callback(response) @contextlib.contextmanager def cleanup(self): try: yield except Exception, e: logging.warning("uncaught exception", exc_info=True) self._run_callback(HTTPResponse(self.request, 599, error=e, request_time=time.time() - self.start_time, ))
class _HTTPConnection(object): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE"]) def __init__(self, io_loop, client, request, callback): self.start_time = time.time() self.io_loop = io_loop self.client = client self.request = request self.callback = callback self.code = None self.headers = None self.chunks = None self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None with stack_context.StackContext(self.cleanup): parsed = urlparse.urlsplit(self.request.url) host = parsed.hostname if parsed.port is None: port = 443 if parsed.scheme == "https" else 80 else: port = parsed.port if self.client.hostname_mapping is not None: host = self.client.hostname_mapping.get(host, host) if parsed.scheme == "https": ssl_options = {} if request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if request.ca_certs is not None: ssl_options["ca_certs"] = request.ca_certs else: ssl_options["ca_certs"] = _DEFAULT_CA_CERTS self.stream = SSLIOStream(socket.socket(), io_loop=self.io_loop, ssl_options=ssl_options) else: self.stream = IOStream(socket.socket(), io_loop=self.io_loop) timeout = min(request.connect_timeout, request.request_timeout) if timeout: self._connect_timeout = self.io_loop.add_timeout( self.start_time + timeout, self._on_timeout) self.stream.set_close_callback(self._on_close) self.stream.connect((host, port), functools.partial(self._on_connect, parsed)) def _on_timeout(self): self._timeout = None if self.callback is not None: self.callback( HTTPResponse(self.request, 599, error=HTTPError(599, "Timeout"))) self.callback = None self.stream.close() def _on_connect(self, parsed): if self._timeout is not None: self.io_loop.remove_callback(self._timeout) self._timeout = None if self.request.request_timeout: self._timeout = self.io_loop.add_timeout( self.start_time + self.request.request_timeout, self._on_timeout) if (self.request.validate_cert and isinstance(self.stream, SSLIOStream)): match_hostname(self.stream.socket.getpeercert(), parsed.hostname) if (self.request.method not in self._SUPPORTED_METHODS and not self.request.allow_nonstandard_methods): raise KeyError("unknown method %s" % self.request.method) for key in ('network_interface', 'proxy_host', 'proxy_port', 'proxy_username', 'proxy_password'): if getattr(self.request, key, None): raise NotImplementedError('%s not supported' % key) if "Host" not in self.request.headers: self.request.headers["Host"] = parsed.netloc username, password = None, None if parsed.username is not None: username, password = parsed.username, parsed.password elif self.request.auth_username is not None: username = self.request.auth_username password = self.request.auth_password if username is not None: auth = "%s:%s" % (username, password) self.request.headers["Authorization"] = ("Basic %s" % base64.b64encode(auth)) if self.request.user_agent: self.request.headers["User-Agent"] = self.request.user_agent has_body = self.request.method in ("POST", "PUT") if has_body: assert self.request.body is not None self.request.headers["Content-Length"] = len(self.request.body) else: assert self.request.body is None if (self.request.method == "POST" and "Content-Type" not in self.request.headers): self.request.headers[ "Content-Type"] = "application/x-www-form-urlencoded" if self.request.use_gzip: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((parsed.path or '/') + (('?' + parsed.query) if parsed.query else '')) request_lines = ["%s %s HTTP/1.1" % (self.request.method, req_path)] for k, v in self.request.headers.get_all(): line = "%s: %s" % (k, v) if '\n' in line: raise ValueError('Newline in header: ' + repr(line)) request_lines.append(line) self.stream.write("\r\n".join(request_lines) + "\r\n\r\n") if has_body: self.stream.write(self.request.body) self.stream.read_until("\r\n\r\n", self._on_headers) @contextlib.contextmanager def cleanup(self): try: yield except Exception, e: logging.warning("uncaught exception", exc_info=True) if self.callback is not None: callback = self.callback self.callback = None callback(HTTPResponse(self.request, 599, error=e))