class ForwardConnection(object): def __init__(self, remote_address, stream, address): self.remote_address = remote_address self.stream = stream self.address = address sock = socket.socket() self.remote_stream = IOStream(sock) self.remote_stream.connect(self.remote_address, self._on_remote_connected) def _on_remote_connected(self): logging.info("forward %r to %r", self.address, self.remote_address) self.remote_stream.read_until_close(self._on_remote_read_close, self.stream.write) self.stream.read_until_close(self._on_read_close, self.remote_stream.write) def _on_remote_read_close(self, data): if self.stream.writing(): self.stream.write(data, self.stream.close) else: self.stream.close() def _on_read_close(self, data): if self.remote_stream.writing(): self.remote_stream.write(data, self.remote_stream.close) else: self.remote_stream.close()
class HTTPServerRawTest(AsyncHTTPTestCase): def get_app(self): return Application([("/echo", EchoHandler)]) def setUp(self): super(HTTPServerRawTest, self).setUp() self.stream = IOStream(socket.socket()) self.stream.connect(("localhost", self.get_http_port()), self.stop) self.wait() def tearDown(self): self.stream.close() super(HTTPServerRawTest, self).tearDown() def test_empty_request(self): self.stream.close() self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() def test_malformed_first_line(self): with ExpectLog(gen_log, ".*Malformed HTTP request line"): self.stream.write(b"asdf\r\n\r\n") # TODO: need an async version of ExpectLog so we don't need # hard-coded timeouts here. self.io_loop.add_timeout(datetime.timedelta(seconds=0.01), self.stop) self.wait() def test_malformed_headers(self): with ExpectLog(gen_log, ".*Malformed HTTP headers"): self.stream.write(b"GET / HTTP/1.0\r\nasdf\r\n\r\n") self.io_loop.add_timeout(datetime.timedelta(seconds=0.01), self.stop) self.wait()
class ManualCapClient(BaseCapClient): def capitalize(self, request_data, callback=None): logging.debug("capitalize") self.request_data = request_data self.stream = IOStream(socket.socket()) self.stream.connect(('127.0.0.1', self.port), callback=self.handle_connect) self.future = Future() if callback is not None: self.future.add_done_callback( stack_context.wrap(lambda future: callback(future.result()))) return self.future def handle_connect(self): logging.debug("handle_connect") self.stream.write(utf8(self.request_data + "\n")) self.stream.read_until(b'\n', callback=self.handle_read) def handle_read(self, data): logging.debug("handle_read") self.stream.close() try: self.future.set_result(self.process_response(data)) except CapError as e: self.future.set_exception(e)
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) stream.connect(('adomainthatdoesntexist.asdf', 54321)) self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
class ForwardConnection(object): def __init__(self, remote_address, stream, address, headers): self.remote_address = remote_address self.stream = stream self.address = address self.headers = headers sock = socket.socket() self.remote_stream = IOStream(sock) self.remote_stream.connect(self.remote_address, self._on_remote_connected) self.remote_stream.set_close_callback(self._on_close) def _on_remote_write_complete(self): logging.info('send request to %s', self.remote_address) self.remote_stream.read_until_close(self._on_remote_read_close) def _on_remote_connected(self): logging.info('forward %r to %r', self.address, self.remote_address) self.remote_stream.write(self.headers, self._on_remote_write_complete) def _on_remote_read_close(self, data): self.stream.write(data, self.stream.close) def _on_close(self): logging.info('remote quit %s', self.remote_address) self.remote_stream.close()
class IRCStream(object): """ A connection to an IRC server utilizing IOStream """ def __init__(self, nick, url, io_loop=None): self.nick = nick self.url = url self.io_loop = io_loop or IOLoop.instance() parsed = urlparse.urlsplit(self.url) assert parsed.scheme == 'irc' if ':' in parsed.netloc: host, _, port = parsed.netloc.partition(':') port = int(port) else: host = parsed.netloc port = 6667 self.host = host self.port = port def connect(self, callback): self.stream = IOStream(socket.socket(), io_loop=self.io_loop) self.stream.connect((self.host, self.port), functools.partial(self._on_connect, callback)) def _on_connect(self, callback): self.stream.write('NICK %s\r\n' % self.nick) callback(True)
def test_empty_request(self): stream = IOStream(socket.socket(), io_loop=self.io_loop) stream.connect(("localhost", self.get_http_port()), self.stop) self.wait() stream.close() self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait()
class _UDPConnection(object): def __init__(self, io_loop, client, request, release_callback, final_callback, max_buffer_size): self.start_time = time.time() self.io_loop = io_loop self.client = client self.request = request self.release_callback = release_callback self.final_callback = final_callback address_info = socket.getaddrinfo(request.address, request.port, socket.AF_INET, socket.SOCK_DGRAM, 0, 0) af, socket_type, proto, _, socket_address = address_info[0] self.stream = IOStream(socket.socket(af, socket_type, proto), io_loop=self.io_loop, max_buffer_size=max_buffer_size) self.stream.connect(socket_address, self._on_connect) def _on_connect(self): self.stream.write(self.request.data) # self.stream.read_bytes(65536, self._on_response) self.stream.read_until('}}', self._on_response) # print("asdfsfeiwjef") def _on_response(self, data): if self.release_callback is not None: release_callback = self.release_callback self.release_callback = None release_callback() self.stream.close() if self.final_callback is not None: final_callback = self.final_callback self.final_callback = None final_callback(data)
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True self.stop() stream.set_close_callback(self.stop) # log messages vary by platform and ioloop implementation with ExpectLog(gen_log, ".*", required=False): stream.connect(("127.0.0.1", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != "cygwin": _ERRNO_CONNREFUSED = (errno.ECONNREFUSED,) if hasattr(errno, "WSAECONNREFUSED"): _ERRNO_CONNREFUSED += (errno.WSAECONNREFUSED,) # cygwin's errnos don't match those used on native windows python self.assertTrue(stream.error.args[0] in _ERRNO_CONNREFUSED)
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket()) stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop) self.wait() stream.write(b"\r\n".join([b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"]), callback=self.stop) self.wait() stream.read_until(b"\r\n\r\n", self.stop) data = self.wait() self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) stream.read_until(b"\r\n", self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) stream.read_until(b"\r\n\r\n", self.stop) header_data = self.wait() headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()
def connect(self): # Creates and returns a connection object for use. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(sock) stream.connect((self.host, self.port)) return self.connection(stream, sock.getsockname(), self.data, self.terminator)
class TCPClient(object): def __init__(self, io_loop=None): self.io_loop = self.io_loop = io_loop or IOLoop.current() #self.shutdown = False self.sock_fd = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.sock_fd.settimeout(0.5) self.stream = IOStream(self.sock_fd) #self.stream.set_close_callback(self.on_close) def connect(self, host, port): #self.stream.connect((self.host, self.port), self.send_message) self.stream.connect((host, port)) return self.stream @return_future def connect_server(self, host, port, callback=None): self.stream.connect((host, port), callback=callback) def on_close(self): if self.shutdown: self.io_loop.stop() def set_shutdown(self): self.shutdown = True
def execute(self, cmd): """ Executes `cmd` on host and returns results Creates socket and tries to execute command against zookeeper. Socket is limited by quasi-Tornado's timeout. It doesn't check validity of response. Note: Timeout should be implemented using tornado.concurrent.chain_future: https://github.com/tornadoweb/tornado/blob/master/tornado/concurrent.py#L316 such a wrapper exists in Tornado 4.0+ - with_timeout https://github.com/tornadoweb/tornado/blob/master/tornado/gen.py#L507 Args: cmd: Four-letter string containing command to execute Returns: Raw response - bytes. Raises: HostConnectionTimeout: If sum times of connection, request, respons exceeds timeout Socket Errors: like ECONNNECTIONREFUSED,... """ ioloop = IOLoop.current() address_family, addr = yield self._resolve(ioloop) stream = IOStream(socket.socket(address_family), io_loop=ioloop) stream.connect(addr) cmd = '{}\n'.format(cmd.strip()) yield gen.Task(stream.write, cmd.encode('utf-8')) data = yield gen.Task(stream.read_until_close) raise gen.Return(data)
def initiate(cls, host, port, infohash): af = socket.AF_INET addrinfo = socket.getaddrinfo(host, port, af, socket.SOCK_STREAM, 0, 0) af, socktype, proto, canonname, sockaddr = addrinfo[0] stream = IOStream(socket.socket(af, socktype, proto), io_loop=cls.io_loop) stream.connect(sockaddr, functools.partial(cls.initiate_connected, stream, sockaddr, infohash))
def connect(self): IOStream.connect(self, ("localhost", self._port), self._on_connect) MjpgClient.clients[self._camera_id] = self logging.debug( "mjpg client for camera %(camera_id)s connecting on port %(port)s..." % {"port": self._port, "camera_id": self._camera_id} )
class IRCStream(object): _instance = None @classmethod def instance(cls): """ Returns the singleton """ if not cls._instance: cls._instance = cls() return cls._instance def __init__(self): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.stream = IOStream(sock) self.host = SETTINGS["irc_host"] self.channel = SETTINGS["irc_channel"] self.stream.connect((self.host, SETTINGS["irc_port"])) self.nick = "PyTexasBot" self.ident = "pytexasbot" self.real_name = "PyTexas StreamBot" self.stream.write("NICK %s\r\n" % self.nick) self.stream.write("USER %s %s blah :%s\r\n" % (self.ident, self.host, self.real_name)) self.stream.write("JOIN #"+self.channel+"\r\n") self.monitor_output() def monitor_output(self): self.stream.read_until("\r\n", self.parse_line) def parse_line(self, response): response = response.strip() if response.startswith("PING "): request = response.replace("PING ", "") self.stream.write("PONG %s\r\n" % request) splitter = "PRIVMSG #%s :" % self.channel if splitter in response: parts = response.split(splitter) text = parts[1] if not text: # not going to throw out empty messages return self.monitor_output() nick = parts[0][1:].split("!")[0].strip() message = { "time": int(time.time()), "text": xhtml_escape(text), "name": nick, "username": "******", "type": "tweet", "avatar": None } broadcast_message(message) if response.startswith("ERROR"): raise Exception(response) else: print response self.monitor_output()
def _maybe_connect(self, to_pid, callback=None): """Asynchronously establish a connection to the remote pid.""" callback = stack_context.wrap(callback or (lambda stream: None)) def streaming_callback(data): # we are not guaranteed to get an acknowledgment, but log and discard bytes if we do. log.info('Received %d bytes from %s, discarding.' % (len(data), to_pid)) log.debug(' data: %r' % (data,)) def on_connect(exit_cb, stream): log.info('Connection to %s established' % to_pid) with self._connection_callbacks_lock: self._connections[to_pid] = stream self.__dispatch_on_connect_callbacks(to_pid, stream) self.__loop.add_callback( stream.read_until_close, exit_cb, streaming_callback=streaming_callback) create = False with self._connection_callbacks_lock: stream = self._connections.get(to_pid) callbacks = self._connection_callbacks.get(to_pid) if not stream: self._connection_callbacks[to_pid].append(callback) if not callbacks: create = True if stream: self.__loop.add_callback(callback, stream) return if not create: return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) if not sock: raise self.SocketError('Failed opening socket') stream = IOStream(sock, io_loop=self.__loop) stream.set_nodelay(True) stream.set_close_callback(partial(self.__on_exit, to_pid, b'reached end of stream')) connect_callback = partial(on_connect, partial(self.__on_exit, to_pid), stream) log.info('Establishing connection to %s' % to_pid) stream.connect((to_pid.ip, to_pid.port), callback=connect_callback) if stream.closed(): raise self.SocketError('Failed to initiate stream connection') log.info('Maybe connected to %s' % to_pid)
def new_stream(ip, port, callback=None): """ Create, connect and return a stream in blocking mode. This is for longterm connection use, for async connection see `async_stream_task` """ # TODO: handle exception on IOStream.connect() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) stream = IOStream(s) stream.connect((ip, port), callback=callback) return stream
class HTTPServerRawTest(AsyncHTTPTestCase): def get_app(self): return Application([ ('/echo', EchoHandler), ]) def setUp(self): super(HTTPServerRawTest, self).setUp() self.stream = IOStream(socket.socket()) self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() def tearDown(self): self.stream.close() super(HTTPServerRawTest, self).tearDown() def test_empty_request(self): self.stream.close() self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() def test_malformed_first_line(self): with ExpectLog(gen_log, '.*Malformed HTTP request line'): self.stream.write(b'asdf\r\n\r\n') # TODO: need an async version of ExpectLog so we don't need # hard-coded timeouts here. self.io_loop.add_timeout(datetime.timedelta(seconds=0.01), self.stop) self.wait() def test_malformed_headers(self): with ExpectLog(gen_log, '.*Malformed HTTP headers'): self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n') self.io_loop.add_timeout(datetime.timedelta(seconds=0.01), self.stop) self.wait() def test_chunked_request_body(self): # Chunked requests are not widely supported and we don't have a way # to generate them in AsyncHTTPClient, but HTTPServer will read them. self.stream.write(b"""\ POST /echo HTTP/1.1 Transfer-Encoding: chunked Content-Type: application/x-www-form-urlencoded 4 foo= 3 bar 0 """.replace(b"\n", b"\r\n")) read_stream_body(self.stream, self.stop) headers, response = self.wait() self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
def _connect(self): try: if self.unix_socket and self.host in ('localhost', '127.0.0.1'): sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.host_info = "Localhost via UNIX socket" address = self.unix_socket else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.host_info = "socket %s:%d" % (self.host, self.port) address = (self.host, self.port) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) if self.no_delay: sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock = IOStream(sock) child_gr = greenlet.getcurrent() main = child_gr.parent assert main is not None, "Execut must be running in child greenlet" if self.connect_timeout: def timeout(): if not self.socket: raise Exception("connection timeout") IOLoop.current().add_timeout(time.time()+self.connect_timeout, timeout) def connected(): def close_callback(): self.close() sock.set_close_callback(close_callback) self.socket = sock child_gr.switch() sock.connect(address, connected) main.switch() self._rfile = self.socket self._get_server_information() self._request_authentication() if self.sql_mode is not None: c = self.cursor() c.execute("SET sql_mode=%s", (self.sql_mode,)) if self.init_command is not None: c = self.cursor() c.execute(self.init_command) self.commit() if self.autocommit_mode is not None: self.autocommit(self.autocommit_mode) except Exception as e: self._rfile = None self.socket.close() self.socket = None raise err.OperationalError( 2003, "Can't connect to MySQL server on %r (%s)" % (self.host, e))
def initiate(cls, host, port, infohash): logging.info('initiating connection with %s,%s with hash %s' % (host, port, [infohash])) af = socket.AF_INET addrinfo = socket.getaddrinfo(host, port, af, socket.SOCK_STREAM, 0, 0) af, socktype, proto, canonname, sockaddr = addrinfo[0] stream = IOStream(socket.socket(af, socktype, proto), io_loop=cls.ioloop) conn = cls(stream, sockaddr, cls.application, self_initiated=True) stream.connect( sockaddr, functools.partial(cls.initiate_connected, conn, infohash) ) return conn
def connect(self): """Connects to the remote host and triggers the `connect_cb` callback that was provided to the constructor. """ if self.is_connected: self.close() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(sock) stream.connect((self.host, self.port), self.on_connect) self.pipe = Pipe(stream, *self.pipe_args)
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) # To reliably generate a gaierror we use a malformed domain name # instead of a name that's simply unlikely to exist (since # opendns and some ISPs return bogus addresses for nonexistent # domains instead of the proper error codes). stream.connect(('an invalid domain', 54321)) self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
def get_stream(self): "Return IOStream or SSLIOstream to VNC server" import socket import commands from tornado.iostream import IOStream sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(sock) stream.set_close_callback(self.on_close) stream.connect( ('127.0.0.1', self.get_vnc_port()), self.connected_to_vnc) return stream
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) # To reliably generate a gaierror we use a malformed domain name # instead of a name that's simply unlikely to exist (since # opendns and some ISPs return bogus addresses for nonexistent # domains instead of the proper error codes). with ExpectLog(gen_log, "Connect error"): stream.connect(('an invalid domain', 54321)) self.assertTrue(isinstance(stream.error, socket.gaierror), stream.error)
class Flash(object): def __init__(self, close_callback=None): self._iostream = None self._close_callback = close_callback def connect(self, host='127.0.0.1', port=9999): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._iostream = IOStream(sock) self._iostream.set_close_callback(self._on_connection_close) # коннектимся и начинаем слушать команды self._iostream.connect((host, port), self._read_head) def close(self): self._on_connection_close() def _on_connection_close(self): self._iostream.close() if self._close_callback: self._close_callback() def _read_head(self): self._iostream.read_bytes(BaseCommand.meta_size, self._on_read_head) def _on_read_head(self, data): ctype, length = struct.unpack(">BH", data) if length: self._iostream.read_bytes(length, partial(self.execute_command, ctype)) else: self.execute_command(ctype) def execute_command(self, ctype, value=None): command = CommandsRegistry.get_by_type(ctype) if command is not None: command.execute(value) # else: # print 'unknown command: type={:#x}'.format(ctype) self._read_head() @classmethod def start(cls, host, port): flash = cls(close_callback=IOLoop.instance().stop) flash.connect(host, port) signal.signal(signal.SIGINT, flash.close) IOLoop.instance().start() IOLoop.instance().close()
def connect(): logging.debug('testing rtsp netcam at %s' % url) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.settimeout(settings.MJPG_CLIENT_TIMEOUT) stream = IOStream(s) stream.set_close_callback(on_close) stream.connect((data['host'], int(data['port'])), on_connect) timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT), functools.partial(on_connect, _timeout=True)) return stream
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) port = get_unused_port() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called)
class _HTTPConnection(simple_httpclient._HTTPConnection): def __init__(self, *args, **kwargs): self.source_address = kwargs.pop('source_address', None) super(_HTTPConnection, self).__init__(*args, **kwargs) def _on_resolve(self, addrinfo): af, sockaddr = addrinfo[0] if self.parsed.scheme == "https": ssl_options = {} if self.request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if self.request.ca_certs is not None: ssl_options["ca_certs"] = self.request.ca_certs else: ssl_options["ca_certs"] = simple_httpclient._DEFAULT_CA_CERTS if self.request.client_key is not None: ssl_options["keyfile"] = self.request.client_key if self.request.client_cert is not None: ssl_options["certfile"] = self.request.client_cert if sys.version_info >= (2, 7): ssl_options["ciphers"] = "DEFAULT:!SSLv2" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_SSLv3 self.stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=self.max_buffer_size) else: self.stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) if self.source_address: self.stream.socket.bind(self.source_address) timeout = min(self.request.connect_timeout, self.request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(self._on_timeout)) self.stream.set_close_callback(self._on_close) # ipv6 addresses are broken (in self.parsed.hostname) until # 2.7, here is correctly parsed value calculated in __init__ self.stream.connect(sockaddr, self._on_connect, server_hostname=self.parsed_hostname)
class RemoteUpstream(Upstream): """ The most methods are the same in LocalUpstream, but maybe in future need to be diffrent. """ def initialize(self): self.socket = socket.socket(self._address_type, socket.SOCK_STREAM) self.stream = IOStream(self.socket) self.stream.set_close_callback(self.on_close) def do_connect(self): self.stream.connect(self.dest, self.on_connect) @property def address(self): return self.socket.getsockname() @property def address_type(self): return self._address_type def on_connect(self): self.connection_callback(self) on_finish = functools.partial(self.on_streaming_data, finished=True) self.stream.read_until_close(on_finish, self.on_streaming_data) def on_close(self): if self.stream.error: self.error_callback(self, self.stream.error) else: self.close_callback(self) def on_streaming_data(self, data, finished=False): if len(data): self.streaming_callback(self, data) def do_write(self, data): try: self.stream.write(data) except IOError as e: self.close() def do_close(self): if self.socket: logger.info("close upstream: %s:%s" % self.address) self.stream.close()
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s, io_loop=self.io_loop) stream.set_close_callback(self.stop) with mock.patch('socket.socket.connect', side_effect=socket.gaierror('boom')): with ExpectLog(gen_log, "Connect error"): stream.connect(('localhost', 80), callback=self.stop) self.wait() self.assertIsInstance(stream.error, socket.gaierror)
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) stream.set_close_callback(self.stop) with mock.patch('socket.socket.connect', side_effect=socket.gaierror(errno.EIO, 'boom')): with ExpectLog(gen_log, "Connect error"): stream.connect(('localhost', 80), callback=self.stop) self.wait() self.assertIsInstance(stream.error, socket.gaierror)
class RemoteUpstream(Upstream): """ The most methods are the same in LocalUpstream, but maybe in future need to be diffrent. """ def initialize(self): self.socket = socket.socket(self._address_type, socket.SOCK_STREAM) self.stream = IOStream(self.socket) self.stream.set_close_callback(self.on_close) def do_connect(self): self.stream.connect(self.dest, self.on_connect) @property def address(self): return self.socket.getsockname() @property def address_type(self): return self._address_type def on_connect(self): self.connection_callback(self) on_finish = functools.partial(self.on_streaming_data, finished=True) self.stream.read_until_close(on_finish, self.on_streaming_data) def on_close(self): if self.stream.error: self.error_callback(self, self.stream.error) else: self.close_callback(self) def on_streaming_data(self, data, finished=False): if len(data): self.streaming_callback(self, data, finished) def do_write(self, data): try: self.stream.write(data) except IOError as e: self.close() def do_close(self): if self.socket: logger.debug("close upstream: %s" % self.socket) self.stream.close()
class UnixSocketTest(AsyncTestCase): """HTTPServers can listen on Unix sockets too. Why would you want to do this? Nginx can proxy to backends listening on unix sockets, for one thing (and managing a namespace for unix sockets can be easier than managing a bunch of TCP port numbers). Unfortunately, there's no way to specify a unix socket in a url for an HTTP client, so we have to test this by hand. """ def setUp(self): super(UnixSocketTest, self).setUp() self.tmpdir = tempfile.mkdtemp() self.sockfile = os.path.join(self.tmpdir, "test.sock") sock = netutil.bind_unix_socket(self.sockfile) app = Application([("/hello", HelloWorldRequestHandler)]) self.server = HTTPServer(app, io_loop=self.io_loop) self.server.add_socket(sock) self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop) self.stream.connect(self.sockfile, self.stop) self.wait() def tearDown(self): self.stream.close() self.server.stop() shutil.rmtree(self.tmpdir) super(UnixSocketTest, self).tearDown() def test_unix_socket(self): self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n") self.stream.read_until(b"\r\n", self.stop) response = self.wait() self.assertEqual(response, b"HTTP/1.0 200 OK\r\n") self.stream.read_until(b"\r\n\r\n", self.stop) headers = HTTPHeaders.parse(self.wait().decode('latin1')) self.stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Hello world") def test_unix_socket_bad_request(self): # Unix sockets don't have remote addresses so they just return an # empty string. with ExpectLog(gen_log, "Malformed HTTP message from"): self.stream.write(b"garbage\r\n\r\n") self.stream.read_until_close(self.stop) response = self.wait() self.assertEqual(response, b"")
def _create_stream(self, max_buffer_size, af, addr): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) return stream.connect(addr)
def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket()) yield stream.connect(("127.0.0.1", self.get_http_port())) yield stream.write( b"\r\n".join( [ b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n", ] ) ) data = yield stream.read_until(b"\r\n\r\n") self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) first_line = yield stream.read_until(b"\r\n") self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) header_data = yield stream.read_until(b"\r\n\r\n") headers = HTTPHeaders.parse(native_str(header_data.decode("latin1"))) body = yield stream.read_bytes(int(headers["Content-Length"])) self.assertEqual(body, b"Got 1024 bytes in POST") stream.close()
def _connectGS(self, gs): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) cstream = IOStream(s) cstream.connect(gs) #todo: 记录gsid #todo: 不需要common,gamefuns等函数 #todo: 修改clearOnline #todo: 定时发送广播消息 self._lastid += 1 transid = self._lastid ctransport = CTransport(cstream, transid, gs, self.from_gs_receive, self.gs_close, **self.kwargs) self._connpool[transid] = ctransport self._poolsize = len(self._connpool)
def test_indexing_line(self): client = AsyncHTTPClient(io_loop=self.io_loop) ping = yield client.fetch("http://*****:*****@version'], 1) self.assertEqual(doc['message'], "My name is Yuri and I'm 6 years old.")
def test_handle_stream_coroutine_logging(self): # handle_stream may be a coroutine and any exception in its # Future will be logged. class TestServer(TCPServer): @gen.coroutine def handle_stream(self, stream, address): yield gen.moment stream.close() 1 / 0 server = client = None try: sock, port = bind_unused_port() with NullContext(): server = TestServer() server.add_socket(sock) client = IOStream(socket.socket()) with ExpectLog(app_log, "Exception in callback"): yield client.connect(('localhost', port)) yield client.read_until_close() yield gen.moment finally: if server is not None: server.stop() if client is not None: client.close()
def test_message_response(self): # handle_stream may be a coroutine and any exception in its # Future will be logged. server = client = None try: sock, port = bind_unused_port() sock2, port2 = bind_unused_port() with NullContext(): server = StatusServer() notify_server = NotifyServer() notify_server.add_socket(sock2) server.notify_server = notify_server server.add_socket(sock) client = IOStream(socket.socket()) yield client.connect(('localhost', port)) yield client.write(msg1) results = yield client.read_bytes(4) assert results == b'\x11\x00\x01\x10' finally: if server is not None: server.stop() if client is not None: client.close()
def _create_stream(self, max_buffer_size, af, addr, source_ip=None, source_port=None): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. source_port_bind = source_port if isinstance(source_port, int) else 0 source_ip_bind = source_ip if source_port_bind and not source_ip: # User required a specific port, but did not specify # a certain source IP, will bind to the default loopback. source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1' # Trying to use the same address family as the requested af socket: # - 127.0.0.1 for IPv4 # - ::1 for IPv6 socket_obj = socket.socket(af) set_close_exec(socket_obj.fileno()) if source_port_bind or source_ip_bind: # If the user requires binding also to a specific IP/port. try: socket_obj.bind((source_ip_bind, source_port_bind)) except socket.error: socket_obj.close() # Fail loudly if unable to use the IP/port. raise try: stream = IOStream(socket_obj, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream, stream.connect(addr)
def connect(): if send_auth[0]: logging.debug('testing rtsp netcam at %s (this time with credentials)' % url) else: logging.debug('testing rtsp netcam at %s' % url) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) s.settimeout(settings.MJPG_CLIENT_TIMEOUT) stream = IOStream(s) stream.set_close_callback(on_close) stream.connect((host, int(port)), on_connect) timeout[0] = io_loop.add_timeout(datetime.timedelta(seconds=settings.MJPG_CLIENT_TIMEOUT), functools.partial(on_connect, _timeout=True)) return stream
def test_connection_refused(self): # When a connection is refused, the connect callback should not # be run. (The kqueue IOLoop used to behave differently from the # epoll IOLoop in this respect) port = get_unused_port() stream = IOStream(socket.socket(), self.io_loop) self.connect_called = False def connect_callback(): self.connect_called = True stream.set_close_callback(self.stop) stream.connect(("localhost", port), connect_callback) self.wait() self.assertFalse(self.connect_called) self.assertTrue(isinstance(stream.error, socket.error), stream.error) if sys.platform != 'cygwin': # cygwin's errnos don't match those used on native windows python self.assertEqual(stream.error.args[0], errno.ECONNREFUSED)
def _connect(self): host = self._env.config.host port = self._env.config.port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) def callback(): print 'connect ok, begin to recv data' self._recvData(stream) self._recvFromRelay(stream) def closeCallback(): print 'connection was closed!' stream.set_close_callback(closeCallback) stream.connect((host, port), callback)
def test_unix_socket(self): sockfile = os.path.join(self.tmpdir, "test.sock") sock = netutil.bind_unix_socket(sockfile) app = Application([("/hello", HelloWorldRequestHandler)]) server = HTTPServer(app, io_loop=self.io_loop) server.add_socket(sock) stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop) stream.connect(sockfile, self.stop) self.wait() stream.write(b("GET /hello HTTP/1.0\r\n\r\n")) stream.read_until(b("\r\n"), self.stop) response = self.wait() self.assertEqual(response, b("HTTP/1.0 200 OK\r\n")) stream.read_until(b("\r\n\r\n"), self.stop) headers = HTTPHeaders.parse(self.wait().decode('latin1')) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b("Hello world"))
def make_iostream_pair(self): port = get_unused_port() [listener] = netutil.bind_sockets(port, '127.0.0.1', family=socket.AF_INET) streams = [None, None] def accept_callback(connection, address): streams[0] = IOStream(connection, io_loop=self.io_loop) self.stop() def connect_callback(): streams[1] = client_stream self.stop() netutil.add_accept_handler(listener, accept_callback, io_loop=self.io_loop) client_stream = IOStream(socket.socket(), io_loop=self.io_loop) client_stream.connect(('127.0.0.1', port), callback=connect_callback) self.wait(condition=lambda: all(streams)) return streams
class DBusConnection: def __init__(self, bus_addr): self.auth_parser = SASLParser() self.parser = Parser() self.router = Router(Future) self.authentication = Future() self.unique_name = None self._sock = socket.socket(family=socket.AF_UNIX) self.stream = IOStream(self._sock, read_chunk_size=4096) def connected(): self.stream.write(b'\0' + make_auth_external()) self.stream.connect(bus_addr, connected) self.stream.read_until_close(streaming_callback=self.data_received) def _authenticated(self): self.stream.write(BEGIN) self.authentication.set_result(True) self.data_received_post_auth(self.auth_parser.buffer) def data_received(self, data): if self.authentication.done(): return self.data_received_post_auth(data) self.auth_parser.feed(data) if self.auth_parser.authenticated: self._authenticated() elif self.auth_parser.error: self.authentication.set_exception(AuthenticationError(self.auth_parser.error)) def data_received_post_auth(self, data): for msg in self.parser.feed(data): self.router.incoming(msg) def send_message(self, message): if not self.authentication.done(): raise RuntimeError("Wait for authentication before sending messages") future = self.router.outgoing(message) data = message.serialise() self.stream.write(data) return future
class DecoratorCapClient(BaseCapClient): @return_future def capitalize(self, request_data, callback): logging.info("capitalize") self.request_data = request_data self.stream = IOStream(socket.socket()) self.stream.connect(('127.0.0.1', self.port), callback=self.handle_connect) self.callback = callback def handle_connect(self): logging.info("handle_connect") self.stream.write(utf8(self.request_data + "\n")) self.stream.read_until(b'\n', callback=self.handle_read) def handle_read(self, data): logging.info("handle_read") self.stream.close() self.callback(self.process_response(data))
def _create_stream(self, max_buffer_size, af, addr): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. s = socket.socket(af) log.debug("connect:%s" % af) s.bind((config.bind_ip, 0)) stream = IOStream(s, io_loop=self.io_loop, max_buffer_size=max_buffer_size) return stream.connect(addr)
class tcp_gateway(object): ''' tcpip消息入口服务器,iostream默认缓冲区100mb ''' def __init__(self, in_port, callback_entrance): super(tcp_gateway, self).__init__() #注意此处tcpserver可以有出入缓冲区大小的设置 self.__tcp_lisener = tcp_listener(callback_entrance) self.__tcp_lisener.bind(in_port) self.__tcp_lisener.start() pass def __sending_action(self): self.__sending_stream.write(self.__message, self.__clear_up) #发送完成时关闭流 pass def __clear_up(self): self.__sending_stream.close() self.__sending_stream = None self.__message = None pass def __send_string(self, target_ip, target_port, string_data): self.sending_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self.__message = string_data self.__sending_stream = IOStream(self.sending_socket) self.__sending_stream.connect((target_ip, target_port), self.__sending_action) pass def send_json_dict(self, target_ip, target_port, dict, timeout_seconds=None, timeout_action=None): '''发送json结构,字典内只能有dict/map,或者list这两种数据结构,如果超时n秒都没有收到消息,则执行timeout_action''' self.__send_string(target_ip, target_port, json.dumps(dict)) pass pass
def capitalize(self, request_data): logging.debug("capitalize") stream = IOStream(socket.socket()) logging.debug("connecting") yield stream.connect(("10.0.0.7", self.port)) stream.write(utf8(request_data + "\n")) logging.debug("reading") data = yield stream.read_until(b"\n") logging.debug("returning") stream.close() raise gen.Return(self.process_response(data))
def capitalize(self, request_data): logging.debug('capitalize') stream = IOStream(socket.socket()) logging.debug('connecting') yield stream.connect(('127.0.0.1', self.port)) stream.write(utf8(request_data + '\n')) logging.debug('reading') data = yield stream.read_until(b'\n') logging.debug('returning') stream.close() raise gen.Return(self.process_response(data))
class TokenizerService(object): ''' Wraps the IPC to the Java TokenizerService (which runs tokenization and named entity extraction through CoreNLP) ''' def __init__(self): self._socket = IOStream(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) self._requests = dict() self._next_id = 0 @tornado.gen.coroutine def run(self): yield self._socket.connect(('127.0.0.1', PORT)) while True: try: response = yield self._socket.read_until(b'\n') except StreamClosedError: response = None if not response: return response = json.loads(str(response, encoding='utf-8')) id = int(response['req']) result = TokenizerResult(tokens=list(clean_tokens(response['tokens'])), values=response['values'], constituency_parse=response['constituencyParse'], pos_tags=response['pos'], raw_tokens=response['rawTokens'], sentiment=response['sentiment']) self._requests[id].set_result(result) del self._requests[id] def tokenize(self, language_tag, query, expect=None): id = self._next_id self._next_id += 1 req = dict(req=id, utterance=query, languageTag=language_tag) if expect is not None: req['expect'] = expect outer = Future() self._requests[id] = outer def then(future): if future.exception(): outer.set_exception(future.exception()) del self._requests[id] future = self._socket.write(json.dumps(req).encode()) future.add_done_callback(then) return outer
def _create_stream(self, max_buffer_size, af, addr): # Always connect in plaintext; we'll convert to ssl if necessary # after one connection has completed. try: stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) except socket.error as e: fu = Future() fu.set_exception(e) return fu else: return stream.connect(addr)
def _create_stream(self, host, ssl_options, max_buffer_size, af, addr): # TODO: we should connect in plaintext mode and start the # ssl handshake only after stopping the _Connector. if ssl_options is None: stream = IOStream(socket.socket(af), io_loop=self.io_loop, max_buffer_size=max_buffer_size) else: stream = SSLIOStream(socket.socket(af), io_loop=self.io_loop, ssl_options=ssl_options, max_buffer_size=max_buffer_size) return stream.connect(addr, server_hostname=host)
def test_timeout(self): stream = IOStream(socket.socket()) try: yield stream.connect(("10.0.0.7", self.get_http_port())) # Use a raw stream because AsyncHTTPClient won't let us read a # response without finishing a body. stream.write(b"PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n" b"Content-Length: 42\r\n\r\n") with ExpectLog(gen_log, "Timeout reading body"): response = yield stream.read_until_close() self.assertEqual(response, b"") finally: stream.close()
class AsyncSocketHanlder(SocketHandler): """ """ def __init__(self, host, port, ioloop=None): """""" super(AsyncSocketHanlder, self).__init__(host, port) self._ioloop = ioloop self._stream = None def makeSocket(self, timeout=1): """""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) self._stream = IOStream(s) self._stream.connect((self.host, self.port)) return s def send(self, s): """""" print s if self._stream is None: self.createSocket() self._stream.write(s)
def test_gaierror(self): # Test that IOStream sets its exc_info on getaddrinfo error. # It's difficult to reliably trigger a getaddrinfo error; # some resolvers own't even return errors for malformed names, # so we mock it instead. If IOStream changes to call a Resolver # before sock.connect, the mock target will need to change too. s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) stream = IOStream(s) stream.set_close_callback(self.stop) with mock.patch("socket.socket.connect", side_effect=socket.gaierror(errno.EIO, "boom")): with self.assertRaises(StreamClosedError): yield stream.connect(("10.0.0.7", 80)) self.assertTrue(isinstance(stream.error, socket.gaierror))