def _run_traffic_jam(nsends, nbytes): # This test eats `nsends * nbytes` bytes in RAM np = pytest.importorskip('numpy') from distributed.protocol import to_serialize data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data) with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=0.01) b.start(stream) msg = {'x': to_serialize(data)} for i in range(nsends): b.send(assoc(msg, 'i', i)) if np.random.random() > 0.5: yield gen.sleep(0.001) results = [] count = 0 while len(results) < nsends: # If this times out then I think it's a backpressure issue # Somehow we're able to flood the socket so that the receiving end # loses some of our messages L = yield gen.with_timeout(timedelta(seconds=5), read(stream)) count += 1 results.extend(r['i'] for r in L) assert count == b.batch_count == e.count assert b.message_count == nsends assert results == list(range(nsends)) stream.close() # external closing yield b.close(ignore_closed=True)
def test_stress(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) L = [] @gen.coroutine def send(): b = BatchedSend(interval=3) b.start(stream) for i in range(0, 10000, 2): b.send(i) b.send(i + 1) yield gen.sleep(0.00001 * random.randint(1, 10)) @gen.coroutine def recv(): while True: result = yield gen.with_timeout(timedelta(seconds=1), read(stream)) print(result) L.extend(result) if result[-1] == 9999: break yield All([send(), recv()]) assert L == list(range(0, 10000, 1)) stream.close()
def connect(self, address, deserialize=True, **connection_args): self._check_encryption(address, connection_args) ip, port = parse_host_port(address) kwargs = self._get_connect_args(**connection_args) client = TCPClient() try: stream = yield client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs) # Under certain circumstances tornado will have a closed connnection with an error and not raise # a StreamClosedError. # # This occurs with tornado 5.x and openssl 1.1+ if stream.closed() and stream.error: raise StreamClosedError(stream.error) except StreamClosedError as e: # The socket connect() call failed convert_stream_closed_error(self, e) local_address = self.prefix + get_stream_address(stream) raise gen.Return(self.comm_class(stream, local_address, self.prefix + address, deserialize))
def whois_async(query, fields=None): """ Perform whois request :param query: :param fields: :return: """ logger.debug("whois %s", query) # Get appropriate whois server if is_fqdn(query): # Use TLD.whois-servers.net for domain lookup tld = query.split(".")[-1] server = "%s.whois-servers.net" % tld else: server = DEFAULT_WHOIS_SERVER # Perform query try: client = TCPClient() stream = yield client.connect(server, DEFAULT_WHOIS_PORT) except IOError as e: logger.error("Cannot resolve host '%s': %s", server, e) raise tornado.gen.Return() try: yield stream.write(str(query) + "\r\n") data = yield stream.read_until_close() finally: yield stream.close() data = parse_response(data) if fields: data = [(k, v) for k, v in data if k in fields] raise tornado.gen.Return(data)
def _run_traffic_jam(nsends, nbytes): # This test eats `nsends * nbytes` bytes in RAM np = pytest.importorskip('numpy') from distributed.protocol import to_serialize data = bytes(np.random.randint(0, 255, size=(nbytes, )).astype('u1').data) with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=0.01) b.start(stream) msg = {'x': to_serialize(data)} for i in range(nsends): b.send(assoc(msg, 'i', i)) if np.random.random() > 0.5: yield gen.sleep(0.001) results = [] count = 0 while len(results) < nsends: # If this times out then I think it's a backpressure issue # Somehow we're able to flood the socket so that the receiving end # loses some of our messages L = yield gen.with_timeout(timedelta(seconds=5), read(stream)) count += 1 results.extend(r['i'] for r in L) assert count == b.batch_count == e.count assert b.message_count == nsends assert results == list(range(nsends)) stream.close() # external closing yield b.close(ignore_closed=True)
def connect(self): client = TCPClient() self.stream = yield client.connect(self.host, self.port) # sock = None # try: # if self.unix_socket and self.host in ('localhost', '127.0.0.1'): # sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # t = sock.gettimeout() # sock.settimeout(self.connect_timeout) # sock.connect(self.unix_socket) # sock.settimeout(t) # self.host_info = "Localhost via UNIX socket" # if DEBUG: print('connected using unix_socket') # else: # while True: # try: # sock = socket.create_connection( # (self.host, self.port), self.connect_timeout) # break # except (OSError, IOError) as e: # if e.errno == errno.EINTR: # continue # raise # self.host_info = "socket %s:%d" % (self.host, self.port) # if DEBUG: print('connected using socket') # sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # if self.no_delay: # sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # self.socket = sock # self._rfile = _makefile(sock, 'rb') yield self._get_server_information() yield self._request_authentication()
def connect(self): client = TCPClient(io_loop=self.io_loop) self.stream = yield client.connect(self.host, self.port) # sock = None # try: # if self.unix_socket and self.host in ('localhost', '127.0.0.1'): # sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # t = sock.gettimeout() # sock.settimeout(self.connect_timeout) # sock.connect(self.unix_socket) # sock.settimeout(t) # self.host_info = "Localhost via UNIX socket" # if DEBUG: print('connected using unix_socket') # else: # while True: # try: # sock = socket.create_connection( # (self.host, self.port), self.connect_timeout) # break # except (OSError, IOError) as e: # if e.errno == errno.EINTR: # continue # raise # self.host_info = "socket %s:%d" % (self.host, self.port) # if DEBUG: print('connected using socket') # sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # if self.no_delay: # sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # self.socket = sock # self._rfile = _makefile(sock, 'rb') yield self._get_server_information() yield self._request_authentication()
def test_BatchedSend(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) assert str(len(b.buffer)) in str(b) assert str(len(b.buffer)) in repr(b) b.start(stream) yield gen.sleep(0.020) b.send('hello') b.send('hello') b.send('world') yield gen.sleep(0.020) b.send('HELLO') b.send('HELLO') result = yield read(stream) assert result == ['hello', 'hello', 'world'] result = yield read(stream) assert result == ['HELLO', 'HELLO'] assert b.byte_count > 1
def test_close_twice(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) yield b.close() yield b.close()
class CircleClient(object): def __init__(self, endpoint=DEFAULT_ENDPOINT_DEALER, timeout=5.0): self.endpoint = endpoint self._id = cast_bytes(uuid.uuid4().hex) self.timeout = timeout self.stream = None self.endpoint = endpoint self.client = TCPClient() def stop(self): self.client.close() def send_message(self, command, **props): return self.call(make_message(command, **props)) def call(self, cmd): result = IOLoop.instance().run_sync(lambda: self._call(cmd)) return result @gen.coroutine def _call(self, cmd): if isinstance(cmd, basestring): raise DeprecationWarning('call() takes a mapping') call_id = uuid.uuid4().hex cmd['id'] = call_id host, port = self.endpoint.split(':') try: cmd = json.dumps(cmd) self.stream = yield gen_timeout(self.timeout, self.client.connect(host, port)) yield gen_timeout(self.timeout, self.stream.write(cmd + MSG_END)) except StreamClosedError: raise CallError("Can't connect circled. Maybe it is closed.") except gen.TimeoutError: raise CallError('Connect timed out ({} seconds).'.format( self.timeout)) except ValueError as e: raise CallError(str(e)) while True: try: msg = yield gen_timeout(self.timeout, self.stream.read_until(MSG_END)) msg = rstrip(msg, MSG_END) res = json.loads(msg) if 'id' in res and res['id'] not in (call_id, None): # we got the wrong message continue raise gen.Return(res) except gen.TimeoutError: raise CallError('Run timed out ({} seconds).'.format( self.timeout)) except ValueError as e: raise CallError(str(e))
def test_send_after_stream_finish(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) yield b.last_send b.send('hello') result = yield read(stream); assert result == ['hello']
def test_send_before_start(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.send('hello') b.send('world') b.start(stream) result = yield read(stream); assert result == ['hello', 'world']
def test_close_closed(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) b.send(123) stream.close() # external closing yield b.close(ignore_closed=True)
def test_send_after_stream_finish(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) yield b.last_send b.send('hello') result = yield read(stream) assert result == ['hello']
def connect(self, host, port): self.host = host self.port = port client = TCPClient() try: self.stream = yield client.connect(self.host, self.port) except IOError as e: log.error("%s", repr(e)) raise gen.Return((False, 'Failed to connect')) self.trigger(Event.CONNECT, self) raise gen.Return((True, "OK"))
def connect(ip, port, timeout=1): client = TCPClient() start = time() while True: try: stream = yield client.connect(ip, port) raise Return(stream) except StreamClosedError: if time() - start < timeout: yield gen.sleep(0.01) logger.debug("sleeping on connect") else: raise
def connect(self, address, deserialize=True): ip, port = parse_host_port(address) client = TCPClient() try: stream = yield client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE) except StreamClosedError as e: # The socket connect() call failed convert_stream_closed_error(e) raise gen.Return(TCP(stream, 'tcp://' + address, deserialize))
class SafeTcpStream: def __init__(self, host, port): self.host = host self.port = port self.stream = None self.tcp_client = TCPClient() self.make_tcp_connection_loop() @gen.coroutine def make_tcp_connection_loop(self, once=False): while True: if self.stream is None: try: if once: self.stream = yield gen.with_timeout( ioloop.IOLoop.instance().time() + ONCE_CONNECT_TIMEOUT, self.tcp_client.connect(self.host, self.port)) else: self.stream = yield self.tcp_client.connect(self.host, self.port) if self.stream is not None: self.stream.set_close_callback(self.disconnected) except Exception: pass if once: break yield gen.Task(ioloop.IOLoop.instance().add_timeout, ioloop.IOLoop.instance().time() + RECONNECT_INTERVAL) def disconnected(self): try: self.stream.close() except Exception: pass self.stream = None def write(self, data): if self.stream is not None: return self.stream.write(data)
def Trans(): stream = yield TCPClient.connect('localhost', 8036) try: while True: DATA = raw_input("Enter your input: ") yield stream.write(str(DATA)) back = yield stream.read_bytes(20, partial=True) msg = yield stream.read_bytes(20, partial=True) print msg print back if DATA == 'over': break except iostream.StreamClosedError: pass
def start_app(): tcpClient = TCPClient() try: stream = yield tcpClient.connect('127.0.0.1', 9999) print 'Connection started' app = LaternController(LanternDriver()) client = TLVClient(stream) executer = ThreadPoolExecutor(max_workers=5) while True: command = yield client.getCommand() executer.submit(app.handle, command) except Exception as e: print 'Caught Error: %s' % e IOLoop.instance().add_callback(IOLoop.instance().stop)
def test_BatchedStream_raises(): port = 3435 server = MyServer() server.listen(port) client = TCPClient() stream = yield client.connect('127.0.0.1', port) b = BatchedStream(stream, interval=20) stream.close() with pytest.raises(StreamClosedError): yield b.recv() with pytest.raises(StreamClosedError): yield b.send('123')
def connect(ip, port, timeout=3): client = TCPClient() start = time() while True: future = client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE) try: stream = yield gen.with_timeout(timedelta(seconds=timeout), future) raise Return(stream) except StreamClosedError: if time() - start < timeout: yield gen.sleep(0.01) logger.debug("sleeping on connect") else: raise except gen.TimeoutError: raise IOError("Timed out while connecting to %s:%d" % (ip, port))
def connect(ip, port, timeout=3): client = TCPClient() start = time() while True: try: future = client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE) stream = yield gen.with_timeout(timedelta(seconds=timeout), future) raise Return(stream) except StreamClosedError: if time() - start < timeout: yield gen.sleep(0.01) logger.debug("sleeping on connect") else: raise except gen.TimeoutError: raise IOError("Timed out while connecting to %s:%d" % (ip, port))
def connect(self, address, deserialize=True, **connection_args): self._check_encryption(address, connection_args) ip, port = parse_host_port(address) kwargs = self._get_connect_args(**connection_args) client = TCPClient() try: stream = yield client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs) except StreamClosedError as e: # The socket connect() call failed convert_stream_closed_error(e) # XXX raise gen.Return( self.comm_class(stream, self.prefix + address, deserialize))
def test_BatchedStream(): port = 3434 server = MyServer() server.listen(port) client = TCPClient() stream = yield client.connect('127.0.0.1', port) b = BatchedStream(stream, interval=20) b.send('hello') b.send('world') result = yield b.recv(); assert result == 'hello' result = yield b.recv(); assert result == 'hello' result = yield b.recv(); assert result == 'world' result = yield b.recv(); assert result == 'world' b.close()
def run_task(self, host): client = TCPClient() LOG.debug("connecting to `%s:%s'" % (host, self.port)) try: stream = yield client.connect(host, port=self.port) LOG.debug("sending query `%s' to `%s:%s'" % ( self.content.encode('string-escape'), host, self.port)) yield stream.write(self.content) ret = yield stream.read_until_close() resp = str(ret).encode('string-escape')[:DEBUG_CONTENT_LENGTH] LOG.debug("`%s:%s' returns `%s'" % (host, self.port, resp)) stream.close() del stream self.returns[host] = ret except: LOG.warn("`%s:%s' return status unknown" % (host, self.port)) self.returns[host] = None finally: client = None
def run_task(self, host): client = TCPClient() LOG.debug("connecting to `%s:%s'" % (host, self.port)) try: stream = yield client.connect(host, port=self.port) LOG.debug("sending query `%s' to `%s:%s'" % (self.content.encode('string-escape'), host, self.port)) yield stream.write(self.content) ret = yield stream.read_until_close() resp = str(ret).encode('string-escape')[:DEBUG_CONTENT_LENGTH] LOG.debug("`%s:%s' returns `%s'" % (host, self.port, resp)) stream.close() del stream self.returns[host] = ret except: LOG.warn("`%s:%s' return status unknown" % (host, self.port)) self.returns[host] = None finally: client = None
def connect(address, deserialize = True, **connection_args): ip, port = parse_host_port(address) #kwargs = self._get_connect_args(**connection_args) kwargs = {} # The method in Dask just returns {} as far as I can tell client = TCPClient() try: stream = yield client.connect(ip, port, max_buffer_size = MAX_BUFFER_SIZE, **kwargs) # Under certain circumstances tornado will have a closed connnection with an error and not raise # a StreamClosedError. # # This occurs with tornado 5.x and openssl 1.1+ if stream.closed() and stream.error: raise StreamClosedError(stream.error) except StreamClosedError as e: # The socket connect() call failed convert_stream_closed_error("Lambda", e) local_address = prefix + get_stream_address(stream) raise gen.Return(TCP(stream, local_address, prefix + address, deserialize))
def test_send_before_close(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) cnt = int(e.count) b.send('hello') yield b.close() # close immediately after sending assert not b.buffer start = time() while e.count != cnt + 1: yield gen.sleep(0.01) assert time() < start + 5 with pytest.raises(StreamClosedError): b.send('123')
def test_BatchedSend(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) yield b.last_send yield gen.sleep(0.020) b.send('hello') b.send('hello') b.send('world') yield gen.sleep(0.020) b.send('HELLO') b.send('HELLO') result = yield read(stream); assert result == ['hello', 'hello', 'world'] result = yield read(stream); assert result == ['HELLO', 'HELLO']
def test_BatchedSend(): with echo_server() as e: client = TCPClient() stream = yield client.connect('127.0.0.1', e.port) b = BatchedSend(interval=10) b.start(stream) yield b.last_send yield gen.sleep(0.020) b.send('hello') b.send('hello') b.send('world') yield gen.sleep(0.020) b.send('HELLO') b.send('HELLO') result = yield read(stream) assert result == ['hello', 'hello', 'world'] result = yield read(stream) assert result == ['HELLO', 'HELLO']
def test_BatchedStream(): port = 3434 server = MyServer() server.listen(port) client = TCPClient() stream = yield client.connect('127.0.0.1', port) b = BatchedStream(stream, interval=20) b.send('hello') b.send('world') result = yield b.recv() assert result == 'hello' result = yield b.recv() assert result == 'hello' result = yield b.recv() assert result == 'world' result = yield b.recv() assert result == 'world' b.close()
class HTTPAdapter(BaseAdapter): """The built-in HTTP Adapter for BaseIOStream. Provides a general-case interface for trip sessions to contact HTTP urls by implementing the Transport Adapter interface. This class will usually be created by the :class:`Session <Session>` class under the covers. :param max_retries: The maximum number of retries each connection should attempt. Note, this applies only to failed DNS lookups, socket connections and connection timeouts, never to requests where data has made it to the server. By default, Requests does not retry failed connections. #TODO: If you need granular control over the conditions under which we retry a request, import urllib3's ``Retry`` class and pass that instead. Usage:: >>> import trip >>> s = trip.Session() >>> a = trip.adapters.HTTPAdapter(hostname_mapping='/etc/hosts') >>> s.mount('http://', a) """ def __init__(self, io_loop=None, hostname_mapping=None, max_buffer_size=104857600, max_header_size=None, max_body_size=None): super(HTTPAdapter, self).__init__() self.max_buffer_size = max_buffer_size self.max_header_size = max_header_size self.max_body_size = max_body_size self.io_loop = io_loop or IOLoop.current() self.resolver = Resolver() if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) self.tcp_client = TCPClient(resolver=self.resolver) @gen.coroutine def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): """Sends Request object. Returns Response object. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param stream: (optional) Whether to stream the request content. :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a :ref:`(connect timeout, read timeout) <timeouts>` tuple. :type timeout: float or tuple :param verify: (optional) Whether to verify SSL certificates. :param cert: (optional) Any user-provided SSL certificate to be trusted. :param proxies: (optional) The proxies dictionary to apply to the request. :rtype: trip.adapters.MessageDelegate """ if isinstance(timeout, tuple): try: connect_timeout, read_timeout = timeout except ValueError as e: # this may raise a string formatting error. err = ("Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value".format(timeout)) raise ValueError(err) else: connect_timeout, read_timeout = timeout, timeout timeout_reason = {} if connect_timeout: timeout_reason['reason'] = 'while connecting' self.io_loop.add_timeout( self.io_loop.time() + connect_timeout, stack_context.wrap( functools.partial(self._on_timeout, timeout_reason))) s = yield self.tcp_client.connect(request.host, request.port, af=request.af, ssl_options=self._get_ssl_options( request, verify, cert), max_buffer_size=self.max_buffer_size) if not timeout_reason or timeout_reason.get('reason'): s.set_nodelay(True) timeout_reason.clear() else: raise gen.Return( Timeout(timeout_reason.get('error', 'unknown'), request=request)) connection = HTTPConnection( s, HTTP1ConnectionParameters(no_keep_alive=True, max_header_size=self.max_header_size, max_body_size=self.max_body_size, decompress=request.decompress)) if read_timeout: timeout_reason['reason'] = 'during request' self.io_loop.add_timeout( self.io_loop.time() + connect_timeout, stack_context.wrap( functools.partial(self._on_timeout, timeout_reason))) connection.write_headers(request.start_line, request.headers) if request.body is not None: connection.write(request.body) #TODO: partial sending connection.finish() future = Future() def handle_response(response): if isinstance(response, Exception): future.set_exception(response) else: future.set_result(response) resp = MessageDelegate(request, connection, handle_response, stream) headers_received = yield connection.read_headers(resp) if not stream and headers_received: yield connection.read_body(resp) if not timeout_reason or timeout_reason.get('reason'): timeout_reason.clear() resp = yield future raise gen.Return(resp) else: raise gen.Return( Timeout(timeout_reason.get('error', 'unknown'), request=request)) def _get_ssl_options(self, req, verify, cert): if urlsplit(req.url).scheme == "https": # If we are using the defaults, don't construct a new SSLContext. if req.ssl_options is not None: return req.ssl_options # deal with verify & cert ssl_options = {} if verify: cert_loc = None # Allow self-specified cert location. if verify is not True: cert_loc = verify if not cert_loc: cert_loc = DEFAULT_CA_BUNDLE_PATH if not cert_loc or not os.path.exists(cert_loc): raise IOError( "Could not find a suitable TLS CA certificate bundle, " "invalid path: {0}".format(cert_loc)) # you may change this to avoid server's certificate check ssl_options["cert_reqs"] = 2 # ssl.CERT_REQUIRED ssl_options["ca_certs"] = cert_loc if cert: if not isinstance(cert, basestring): cert_file = cert[0] key_file = cert[1] else: cert_file = cert key_file = None if cert_file and not os.path.exists(cert_file): raise IOError("Could not find the TLS certificate file, " "invalid path: {0}".format(conn.cert_file)) if key_file and not os.path.exists(key_file): raise IOError("Could not find the TLS key file, " "invalid path: {0}".format(conn.key_file)) if key_file is not None: ssl_options["keyfile"] = key_file if cert_file is not None: ssl_options["certfile"] = cert_file # SSL interoperability is tricky. We want to disable # SSLv2 for security reasons; it wasn't disabled by default # until openssl 1.0. The best way to do this is to use # the SSL_OP_NO_SSLv2, but that wasn't exposed to python # until 3.2. Python 2.7 adds the ciphers argument, which # can also be used to disable SSLv2. As a last resort # on python 2.6, we set ssl_version to TLSv1. This is # more narrow than we'd like since it also breaks # compatibility with servers configured for SSLv3 only, # but nearly all servers support both SSLv3 and TLSv1: # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html if (2, 7) <= sys.version_info: # In addition to disabling SSLv2, we also exclude certain # classes of insecure ciphers. ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = 3 # ssl.PROTOCOL_TLSv1 return ssl_options return None def _on_timeout(self, info=None): """Timeout callback. Raise a timeout HTTPError when a timeout occurs. :info string key: More detailed timeout information. """ if info: reason = info.get('reason', 'unknown') info.clear() info['error'] = 'Timeout {0}'.format(reason) def close(self): """Cleans up adapter specific items.""" pass
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIfLocalhostV4(self): Resolver().resolve('localhost', 0, callback=self.stop) addrinfo = self.wait() families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host): port = self.start_server(family) stream = yield self.client.connect(host, port) with closing(stream): stream.write(b"hello") data = yield self.server.streams[0].read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, '127.0.0.1') def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, 'localhost') @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, '::1') @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith('TwistedResolver'): self.skipTest('TwistedResolver does not support multiple addresses') self.do_test_connect(socket.AF_INET6, 'localhost') def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1') @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, '::1') def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, 'localhost') @gen_test def test_refused_ipv4(self): sock, port = bind_unused_port() sock.close() with self.assertRaises(IOError): yield self.client.connect('127.0.0.1', port)
class TCPClientTest(AsyncTestCase): def setUp(self): super().setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and "TRAVIS" in os.environ: self.skipTest("dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super().tearDown() def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("localhost", 80)) families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host, source_ip=None, source_port=None): port = self.start_server(family) stream = yield self.client.connect( host, port, source_ip=source_ip, source_port=source_port ) assert self.server is not None server_stream = yield self.server.queue.get() with closing(stream): stream.write(b"hello") data = yield server_stream.read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, "127.0.0.1") def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, "localhost") @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, "::1") @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith("TwistedResolver"): self.skipTest("TwistedResolver does not support multiple addresses") self.do_test_connect(socket.AF_INET6, "localhost") def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, "127.0.0.1") @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, "::1") def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, "localhost") @gen_test def test_refused_ipv4(self): cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) with self.assertRaises(IOError): yield self.client.connect("127.0.0.1", port) def test_source_ip_fail(self): """Fail when trying to use the source IP Address '8.8.8.8'.""" self.assertRaises( socket.error, self.do_test_connect, socket.AF_INET, "127.0.0.1", source_ip="8.8.8.8", ) def test_source_ip_success(self): """Success when trying to use the source IP Address '127.0.0.1'.""" self.do_test_connect(socket.AF_INET, "127.0.0.1", source_ip="127.0.0.1") @skipIfNonUnix def test_source_port_fail(self): """Fail when trying to use source port 1.""" if getpass.getuser() == "root": # Root can use any port so we can't easily force this to fail. # This is mainly relevant for docker. self.skipTest("running as root") self.assertRaises( socket.error, self.do_test_connect, socket.AF_INET, "127.0.0.1", source_port=1, ) @gen_test def test_connect_timeout(self): timeout = 0.05 class TimeoutResolver(Resolver): def resolve(self, *args, **kwargs): return Future() # never completes with self.assertRaises(TimeoutError): yield TCPClient(resolver=TimeoutResolver()).connect( "1.2.3.4", 12345, timeout=timeout )
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ: self.skipTest("dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. Resolver().resolve('localhost', 80, callback=self.stop) addrinfo = self.wait() families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host): port = self.start_server(family) stream = yield self.client.connect(host, port) with closing(stream): stream.write(b"hello") data = yield self.server.streams[0].read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, '127.0.0.1') def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, 'localhost') @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, '::1') @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith('TwistedResolver'): self.skipTest('TwistedResolver does not support multiple addresses') self.do_test_connect(socket.AF_INET6, 'localhost') def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1') @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, '::1') def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, 'localhost') @gen_test def test_refused_ipv4(self): sock, port = bind_unused_port() sock.close() with self.assertRaises(IOError): yield self.client.connect('127.0.0.1', port)
class Client(object): def __init__(self): logger.debug('Starting Envisalink Client') # Register events for alarmserver requests -> envisalink events.register('alarm_update', self.request_action) # Register events for envisalink proxy events.register('envisalink', self.envisalink_proxy) # Create TCP Client self.tcpclient = TCPClient() # Connection self._connection = None # Set our terminator to \r\n self._terminator = b"\r\n" # Reconnect delay self._retrydelay = 10 self.do_connect() @gen.coroutine def do_connect(self, reconnect = False): # Create the socket and connect to the server if reconnect == True: logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds') yield gen.sleep(self._retrydelay) while self._connection == None: logger.debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT)) try: self._connection = yield self.tcpclient.connect(config.ENVISALINKHOST, config.ENVISALINKPORT) self._connection.set_close_callback(self.handle_close) except StreamClosedError: #failed to connect, but got no connection object so we will loop here logger.warning('Connection failed, retrying in '+str(self._retrydelay)+ ' seconds') yield gen.sleep(self._retrydelay) continue try: line = yield self._connection.read_until(self._terminator) except StreamClosedError: #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out. #and let handle_close deal with this return logger.debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT)) self.handle_line(line) @gen.coroutine def handle_close(self): self._connection = None #logger.info("Disconnected from %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT)) self.do_connect(True) @gen.coroutine def send_command(self, code, data, checksum = True): if checksum == True: to_send = code+data+get_checksum(code,data)+'\r\n' else: to_send = code+data+'\r\n' try: res = yield self._connection.write(to_send) logger.debug('TX > '+to_send[:-1]) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass @gen.coroutine def handle_line(self, rawinput): if rawinput == '': return input = rawinput.strip() if config.ENVISALINKLOGRAW == True: logger.debug('RX RAW < "' + str(input) + '"') if re.match(r'^\d\d:\d\d:\d\d ',input): evltime = input[:8] input = input[9:] if not re.match(r'^[0-9a-fA-F]{5,}$', input): logger.warning('Received invalid TPI message: ' + repr(rawinput)); return code = int(input[:3]) parameters = input[3:][:-2] try: event = getMessageType(int(code)) except KeyError: logger.warning('Received unknown TPI code: "%s", parameters: "%s"' % (input[:3], parameters)) return rcksum = int(input[-2:], 16) ccksum = int(get_checksum(input[:3],parameters), 16) if rcksum != ccksum: logger.warning('Received invalid TPI checksum %02X vs %02X: "%s"' % (rcksum, ccksum, input)) return message = self.format_event(event, parameters) logger.debug('RX < ' +str(code)+' - '+message) try: handler = "handle_%s" % event['handler'] except KeyError: handler = "handle_event" try: func = getattr(self, handler) if handler != 'handle_login': events.put('proxy', None, rawinput) except AttributeError: raise CodeError("Handler function doesn't exist") func(code, parameters, event, message) try: line = yield self._connection.read_until(self._terminator) self.handle_line(line) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass def format_event(self, event, parameters): if 'type' in event: if event['type'] in ('partition', 'zone'): if event['type'] == 'partition': # If parameters includes extra digits then this next line would fail # without looking at just the first digit which is the partition number if int(parameters[0]) in config.PARTITIONNAMES: # After partition number can be either a usercode # or for event 652 a type of arm mode (single digit) # Usercode is always 4 digits padded with zeros if len(str(parameters)) == 5: # We have a usercode try: usercode = int(parameters[1:5]) except: usercode = 0 if int(usercode) in config.ALARMUSERNAMES: alarmusername = config.ALARMUSERNAMES[int(usercode)] else: # Didn't find a username, use the code instead alarmusername = usercode return event['name'].format(str(config.PARTITIONNAMES[int(parameters[0])]), str(alarmusername)) elif len(parameters) == 2: # We have an arm mode instead, get it's friendly name armmode = evl_ArmModes[int(parameters[1])] return event['name'].format(str(config.PARTITIONNAMES[int(parameters[0])]), str(armmode)) else: return event['name'].format(str(config.PARTITIONNAMES[int(parameters)])) elif event['type'] == 'zone': if int(parameters) in config.ZONENAMES: if config.ZONENAMES[int(parameters)]!=False: return event['name'].format(str(config.ZONENAMES[int(parameters)])) return event['name'].format(str(parameters)) #envisalink event handlers, some events are unhandeled. def handle_login(self, code, parameters, event, message): if parameters == '3': self.send_command('005', config.ENVISALINKPASS) if parameters == '1': self.send_command('001', '') if parameters == '0': logger.warning('Incorrect envisalink password') sys.exit(0) def handle_event(self, code, parameters, event, message): # only handle events with a 'type' defined if not 'type' in event: return parameters = int(parameters) try: defaultStatus = evl_Defaults[event['type']] except IndexError: defaultStatus = {} if (event['type'] == 'zone' and parameters in config.ZONENAMES) or (event['type'] == 'partition' and parameters in config.PARTITIONNAMES): events.put('alarm', event['type'], parameters, code, event, message, defaultStatus) elif (event['type'] == 'zone' or event['type'] == 'partition'): logger.debug('Ignoring unnamed %s %s' % (event['type'], parameters)) else: logger.debug('Ignoring unhandled event %s' % event['type']) def handle_zone(self, code, parameters, event, message): self.handle_event(code, parameters[1:], event, message) def handle_partition(self, code, parameters, event, message): self.handle_event(code, parameters[0], event, message) def request_action(self, eventType, type, parameters): partition = str(parameters['partition']) if type == 'arm': self.send_command('030', partition) elif type == 'stayarm': self.send_command('031', partition) elif type == 'armwithcode': self.send_command('033', partition + str(parameters['alarmcode'])) elif type == 'disarm': if 'alarmcode' in parameters: self.send_command('040', partition + str(parameters['alarmcode'])) else: self.send_command('040', partition + str(config.ALARMCODE)) elif type == 'refresh': self.send_command('001', '') elif type == 'pgm': response = {'response' : 'Request to trigger PGM'} @gen.coroutine def envisalink_proxy(self, eventType, type, parameters, *args): try: res = yield self._connection.write(parameters) logger.debug('PROXY > '+parameters.strip()) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ: self.skipTest( "dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. Resolver().resolve('localhost', 80, callback=self.stop) addrinfo = self.wait() families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host, source_ip=None, source_port=None): port = self.start_server(family) stream = yield self.client.connect(host, port, source_ip=source_ip, source_port=source_port) server_stream = yield self.server.queue.get() with closing(stream): stream.write(b"hello") data = yield server_stream.read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, '127.0.0.1') def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, 'localhost') @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, '::1') @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith('TwistedResolver'): self.skipTest( 'TwistedResolver does not support multiple addresses') self.do_test_connect(socket.AF_INET6, 'localhost') def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1') @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, '::1') def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, 'localhost') @gen_test def test_refused_ipv4(self): cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) with self.assertRaises(IOError): yield self.client.connect('127.0.0.1', port) def test_source_ip_fail(self): ''' Fail when trying to use the source IP Address '8.8.8.8'. ''' self.assertRaises(socket.error, self.do_test_connect, socket.AF_INET, '127.0.0.1', source_ip='8.8.8.8') def test_source_ip_success(self): ''' Success when trying to use the source IP Address '127.0.0.1' ''' self.do_test_connect(socket.AF_INET, '127.0.0.1', source_ip='127.0.0.1') @skipIfNonUnix def test_source_port_fail(self): ''' Fail when trying to use source port 1. ''' self.assertRaises(socket.error, self.do_test_connect, socket.AF_INET, '127.0.0.1', source_port=1) @gen_test def test_connect_timeout(self): timeout = 0.05 class TimeoutResolver(Resolver): def resolve(self, *args, **kwargs): return Future() # never completes with self.assertRaises(TimeoutError): yield TCPClient(resolver=TimeoutResolver()).connect( '1.2.3.4', 12345, timeout=timeout)
def main(): factory = TCPClient() stream = yield factory.connect(**options.options.group_dict("connect")) app = Application(stream) app.run()
class Client(object): def __init__(self): debug('Starting Envisalink Client') # Register events for alarmserver requests -> envisalink events.register('alarm_update', self.request_action) # Register events for envisalink proxy events.register('envisalink', self.envisalink_proxy) # Create TCP Client self.tcpclient = TCPClient() # Connection self._connection = None # Set our terminator to \r\n self._terminator = b"\r\n" # Reconnect delay self._retrydelay = 10 # Connect to Envisalink self.do_connect() # Setup timer to refresh envisalink tornado.ioloop.PeriodicCallback(self.check_connection, 1000).start() # Last activity self._last_activity = time.time() def check_connection(self): if (self._last_activity + config.ENVISALINKKEEPALIVE) < time.time(): events.put('alarm_update', 'ping') @gen.coroutine def do_connect(self, reconnect=False): # Create the socket and connect to the server if reconnect == True: warning('Connection failed, retrying in %s seconds' % str(self._retrydelay)) yield gen.sleep(self._retrydelay) while self._connection == None: debug('Connecting to {}:{}'.format(config.ENVISALINKHOST, config.ENVISALINKPORT)) try: self._connection = yield self.tcpclient.connect( config.ENVISALINKHOST, config.ENVISALINKPORT) self._connection.set_close_callback(self.handle_close) except StreamClosedError: #failed to connect, but got no connection object so we will loop here warning('Connection failed, retrying in %s seconds' % str(self._retrydelay)) yield gen.sleep(self._retrydelay) continue except gaierror: #could not resolve host provided, if this is a reconnect, will retry, if not, will fail if reconnect == True: warning( 'Connection failed, unable to resolve hostname %s, retrying in %s seconds' % (config.ENVISALINKHOST, str(self._retrydelay))) yield gen.sleep(self._retrydelay) continue else: warning( 'Connection failed, unable to resolve hostname %s. Exiting due to incorrect hostname.' % config.ENVISALINKHOST) sys.exit(0) try: line = yield self._connection.read_until(self._terminator) except StreamClosedError: #in this state, since the connection object isnt none, its going to throw the callback for handle_close so we just bomb out. #and let handle_close deal with this return debug("Connected to %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT)) self.handle_line(line) @gen.coroutine def handle_close(self): self._connection = None #info("Disconnected from %s:%i" % (config.ENVISALINKHOST, config.ENVISALINKPORT)) self.do_connect(True) @gen.coroutine def send_command(self, code, data='', checksum=True): if checksum == True: to_send = code + data + get_checksum(code, data) + '\r\n' else: to_send = code + data + '\r\n' try: res = yield self._connection.write(to_send) debug('TX > ' + to_send[:-1]) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass @gen.coroutine def handle_line(self, rawinput): self._last_activity = time.time() if rawinput == '': return input = rawinput.strip() if config.ENVISALINKLOGRAW == True: debug('RX RAW < "' + str(input) + '"') if re.match(r'^\d\d:\d\d:\d\d ', input): evltime = input[:8] input = input[9:] if not re.match(r'^[0-9a-fA-F]{5,}$', input): warning('Received invalid TPI message: ' + repr(rawinput)) return code = int(input[:3]) parameters = input[3:][:-2] try: event = getMessageType(int(code)) except KeyError: warning('Received unknown TPI code: "%s", parameters: "%s"' % (input[:3], parameters)) return rcksum = int(input[-2:], 16) ccksum = int(get_checksum(input[:3], parameters), 16) if rcksum != ccksum: warning('Received invalid TPI checksum %02X vs %02X: "%s"' % (rcksum, ccksum, input)) return message = self.format_event(event, parameters) debug('RX < ' + str(code) + ' - ' + message) try: handler = "handle_%s" % event['handler'] except KeyError: handler = "handle_event" try: func = getattr(self, handler) if handler != 'handle_login': events.put('proxy', None, rawinput) except AttributeError: raise CodeError("Handler function doesn't exist") func(code, parameters, event, message) try: line = yield self._connection.read_until(self._terminator) self.handle_line(line) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass def format_event(self, event, parameters): if 'type' in event: if event['type'] in ('partition', 'zone'): if event['type'] == 'partition': # If parameters includes extra digits then this next line would fail # without looking at just the first digit which is the partition number if int(parameters[0]) in config.PARTITIONNAMES: # After partition number can be either a usercode # or for event 652 a type of arm mode (single digit) # Usercode is always 4 digits padded with zeros if len(str(parameters)) == 5: # We have a usercode try: usercode = int(parameters[1:5]) except: usercode = 0 if int(usercode) in config.ALARMUSERNAMES: alarmusername = config.ALARMUSERNAMES[int( usercode)] else: # Didn't find a username, use the code instead alarmusername = usercode return event['name'].format( str(config.PARTITIONNAMES[int(parameters[0])]), str(alarmusername)) elif len(parameters) == 2: # We have an arm mode instead, get it's friendly name armmode = evl_ArmModes[int(parameters[1])] return event['name'].format( str(config.PARTITIONNAMES[int(parameters[0])]), str(armmode)) else: return event['name'].format( str(config.PARTITIONNAMES[int(parameters)])) elif event['type'] == 'zone': if int(parameters) in config.ZONENAMES: if config.ZONENAMES[int(parameters)] != False: return event['name'].format( str(config.ZONENAMES[int(parameters)])) return event['name'].format(str(parameters)) #envisalink event handlers, some events are unhandeled. def handle_login(self, code, parameters, event, message): if parameters == '3': self.send_command('005', config.ENVISALINKPASS) if parameters == '1': self.send_command('001') if parameters == '0': warning('Incorrect envisalink password') sys.exit(0) def handle_event(self, code, parameters, event, message): # only handle events with a 'type' defined if not 'type' in event: return parameters = int(parameters) try: defaultStatus = evl_Defaults[event['type']] except IndexError: defaultStatus = {} if (event['type'] == 'zone' and parameters in config.ZONENAMES) or ( event['type'] == 'partition' and parameters in config.PARTITIONNAMES): events.put('alarm', event['type'], parameters, code, event, message, defaultStatus) elif (event['type'] == 'zone' or event['type'] == 'partition'): debug('Ignoring unnamed %s %s' % (event['type'], parameters)) else: debug('Ignoring unhandled event %s' % event['type']) def handle_zone(self, code, parameters, event, message): self.handle_event(code, parameters[1:], event, message) def handle_partition(self, code, parameters, event, message): self.handle_event(code, parameters[0], event, message) def request_action(self, eventType, type, parameters): try: partition = str(parameters['partition']) except TypeError: partition = None if type == 'arm': self.send_command('030', partition) elif type == 'stayarm': self.send_command('031', partition) elif type == 'armwithcode': self.send_command('033', partition + str(parameters['alarmcode'])) elif type == 'disarm': if 'alarmcode' in parameters: self.send_command('040', partition + str(parameters['alarmcode'])) else: self.send_command('040', partition + str(config.ALARMCODE)) elif type == 'refresh': self.send_command('001') elif type == 'ping': self.send_command('000') elif type == 'pgm': response = {'response': 'Request to trigger PGM'} @gen.coroutine def envisalink_proxy(self, eventType, type, parameters, *args): try: res = yield self._connection.write(parameters) debug('PROXY > ' + parameters.strip()) except StreamClosedError: #we don't need to handle this, the callback has been set for closed connections. pass
class Emitter(object): def __init__(self, port, n=1000, values=1, duration=3.0): self.port = port self.n = n self.values = values self.duration = duration self.message = self.hello self.i = 0 self.pcb = None self.client = None def start(self): self.client = TCPClient() self.pcb = PeriodicCallback(self.send, 1000.0 / self.n) self.pcb.start() IOLoop.current().call_later(self.duration + 0.5, self.stop) IOLoop.current().start() IOLoop.clear_current() def stop(self): if self.pcb is not None: self.pcb.stop() if self.client is not None: self.client.close() IOLoop.current().stop() @gen.coroutine def send(self): if self.i >= self.duration * self.n * self.values: self.pcb.stop() return try: stream = yield self.client.connect('127.0.0.1', self.port) with closing(stream): messages = b''.join(self.message() for _ in range(self.values)) stream.write(messages) self.i += self.values except StreamClosedError: return def hello(self): return b'hello\n' def r(self): s = random.randint(1, 10) v = s / 10.0 + (1.5 - s / 10.0) * random.random() return (s, v) def text(self): return 'sensor{}|{}\n'.format(*self.r()).encode('utf8') def json(self): s, v = self.r() return (json.dumps({ 'sensor{}'.format(s): v, }) + '\n').encode('utf8') def bello(self): # 5 bytes return b'bello' def struct(self): # 8 bytes return struct.pack('If', *self.r())
class ClientWSConnection(websocket.WebSocketHandler): def initialize(self, room_handler): """Store a reference to the "external" RoomHandler instance""" self.__rh = room_handler self.atg_stream = None self.atg = TCPClient() IOLoop.current().spawn_callback(self.atg_connect) def open(self): self.__clientID = self.get_cookie("ftc_cid") self.__agentID = self.get_cookie("ftc_agentid") self.__agentClient = self.get_cookie("cookie_id") self.__ext = self.get_cookie("ext_number") self.__vdn_group = self.get_cookie("vdn_group") self.__ip_address = self.get_cookie("ip_address") self.__today = datetime.now().replace(microsecond=0) self.__rh.add_client_wsconn(self.__clientID, self) logger.info("| WS_OPENED |> %s" % self.__clientID) def on_message(self, message): msg = json.loads(message) mlen = len(msg['payload']) msg['username'] = self.__rh.client_info[self.__clientID]['nick'] """Update After Status""" conn.update('m_user', 'muser_id = %s', msg['userid'], cti_afterstatus=msg['after']) after = str(msg['after']) callno = msg['callno'] busy = msg['busy'] ringtone = msg['ring'] try: conn.query( "SELECT cti_agentpabx, cti_password, cti_extension, cti_afterstatus, cti_agent_group FROM m_user WHERE muser_id = %s", ([self.__agentClient])) for i in range(conn._db_cur.rowcount): row = conn._db_cur.fetchone() consmart.query( "SELECT vdn FROM vdn_agent_group WHERE agent_group = %s", ([row[4]])) row_smart = consmart._db_cur.fetchone() pabx_agent = row[0] pabx_pass = row[1] pabx_ext = row[2] pabx_afsta = row[3] pabx_vdn = row_smart[0] except MySQLdb.Error: print("Error %d: %s" % (e.args[0], e.args[1])) sys.exit(1) varcommand = msg['payload'] if varcommand == 'login': msg_do_login = self.__agentClient + ';do_user_login;' + \ pabx_agent + ';' + pabx_pass + ';' + pabx_ext + ';' + pabx_vdn self.atg_stream.write((msg_do_login).encode()) logger.info("| MSG-CTI |> Login device %s" % (msg_do_login)) msg_do_run_device = pabx_ext + ';do_run_device' self.atg_stream.write((msg_do_run_device).encode()) logger.info("| MSG-CTI |> Run device %s" % (msg_do_run_device)) elif varcommand == 'retrieve': msg_retrieve = self.__agentClient + ';do_dev_retrieve_call' self.atg_stream.write((msg_retrieve).encode()) logger.info("| MSG-CTI |> Retrieve Call %s" % (msg_retrieve)) elif varcommand == 'answer': msg_answer = self.__agentClient + ';do_dev_answer_call' self.atg_stream.write((msg_answer).encode()) logger.info("| MSG-CTI |> Answer Call %s" % (msg_answer)) elif varcommand == 'hangup': msg_hangup = pabx_ext + ';do_dev_hangup_call' self.atg_stream.write((msg_hangup).encode()) logger.info("| MSG-CTI |> Hangup Call %s" % (msg_hangup)) elif varcommand == 'loginagent': msg_acd_login = self.__agentClient + ';do_ag_login;' + pabx_ext + \ ';' + pabx_agent + ';' + pabx_pass + ';' + pabx_afsta self.atg_stream.write((msg_acd_login).encode()) logger.info("| MSG-CTI |> ACD Login %s" % (msg_acd_login)) elif varcommand == 'ready': msg_acd_ready = self.__agentClient + ';do_ag_ready;' + \ pabx_ext + ';' + pabx_agent + ';' + pabx_pass + \ ';' + pabx_afsta self.atg_stream.write((msg_acd_ready).encode()) logger.info("| MSG-CTI |> Ready %s" % (msg_acd_ready)) elif varcommand == 'notready': msg_acd_not_ready = self.__agentClient + ';do_ag_aux;' + \ pabx_ext + ';' + pabx_agent + ';' + pabx_pass + \ ';' + after self.atg_stream.write((msg_acd_not_ready).encode()) logger.info("| MSG-CTI |> Not Ready %s" % (msg_acd_not_ready)) elif varcommand == 'logout': msg_acd_shutdown = self.__agentClient + ';do_ag_logout;' + \ pabx_ext + ';' + pabx_agent + ';' + pabx_pass + \ ';' + pabx_afsta self.atg_stream.write((msg_acd_shutdown).encode()) logger.info("| MSG-CTI |> Logout %s" % (msg_acd_shutdown)) msg_do_shutdown = pabx_ext + ';do_user_shutdown' self.atg_stream.write((msg_do_shutdown).encode()) logger.info("| MSG-CTI |> Shutdown %s" % (msg_do_shutdown)) elif varcommand == 'makecall': msg_do_make_call = self.__agentClient + ';do_dev_make_call;' + callno self.atg_stream.write((msg_do_make_call).encode()) logger.info("| MSG-CTI |> Make Call %s" % (msg_do_make_call)) elif varcommand == 'busy': dt = datetime.now().replace(microsecond=0) if busy < 1: busy = 0 consmart.query( "SELECT tot_acd_call, tot_abd_call FROM agent_activity where agent = %s", ([self.__agentID])) if consmart._db_cur.rowcount: rr = consmart._db_cur.fetchone() if ringtone == 'busy': consmart.query( 'UPDATE call_session SET last_status=status, status=3011 WHERE session_id = %s', ([self.__agentID])) consmart.update('agent_activity', 'agent = %s', self.__agentID, tot_busy_time=busy) elif ringtone == 'ring': consmart.update('agent_activity', 'agent = %s', self.__agentID, tot_ring_time=busy) consmart.query( 'UPDATE call_session SET last_status=status, status=3004, direction = 1, \ agent_time=now(), agent_ring = now() - agent_ring_time \ WHERE session_id = %s', ([self.__clientID])) elif ringtone == 'talk': consmart.update('agent_activity', 'agent = %s', self.__agentID, tot_talk_time=busy) elif ringtone == 'acd': acd_call = int(busy) + int(rr[0]) consmart.update('agent_activity', 'agent = %s', self.__agentID, tot_acd_call=acd_call) elif ringtone == 'abd': abd_call = int(busy) + int(rr[1]) consmart.update('agent_activity', 'agent = %s', self.__agentID, tot_abd_call=abd_call) elif ringtone == 'aux': logger.info("EVENTLOGGER>> Agent Not Ready") activity.dblog_AgentActivity(self.__agentID, 3, self.__vdn_group, dt, self.__ext, busy, self.__ip_address) #"""Start Call Session""" elif ringtone == 'offered': consmart.query( 'SELECT session_id FROM call_session WHERE session_id = %s', ([self.__clientID])) if consmart._db_cur.rowcount < 1: consmart.insert( 'call_session', session_id=self.__clientID, direction=1, status=3002, start_time=dt, a_number=busy, b_number=self.__ext, d_number=0, agent_id=self.__agentID, agent_group=self.__vdn_group, agent_ring_time=datetime.now().replace( microsecond=0), agent_time=datetime.now().replace(microsecond=0), agent_ext=self.__ext) consmart.query( 'UPDATE call_session SET last_status=status, status=3002, agent_id = %s, \ agent_group=%s, agent_ext=%s, agent_ring_time=now(), ivr_duration = now() - start_time \ WHERE session_id = %s', (self.__agentID, self.__vdn_group, self.__ext, [self.__clientID])) elif ringtone == 'hold': consmart.query( 'UPDATE call_session SET agent_hold = unix_timestamp(now()) WHERE session_id = %s', ([self.__clientID])) elif ringtone == 'retrieve': consmart.query( 'UPDATE call_session SET agent_hold = unix_timestamp(now()) - agent_hold WHERE session_id = %s', ([self.__clientID])) elif ringtone == 'disconnect': consmart.query( "UPDATE call_session SET last_status=status, end_time=now(), status=3005, agent_end_time = now(), \ agent_talk = unix_timestamp(now()) - unix_timestamp(agent_time), \ ivr_duration = unix_timestamp(agent_ring_time) - unix_timestamp(start_time) WHERE session_id = %s", ([self.__clientID])) elif ringtone == 'originated': consmart.query( 'UPDATE call_session SET last_status=status, status=3007, b_number=%s WHERE session_id = %s', (busy, [self.__clientID])) elif ringtone == 'init': consmart.query( 'SELECT session_id FROM call_session WHERE session_id = %s', ([self.__clientID])) if consmart._db_cur.rowcount < 1: consmart.insert( 'call_session', session_id=self.__clientID, direction=2, status=3009, start_time=dt, a_number=self.__ext, agent_id=self.__agentID, agent_group=self.__vdn_group, agent_time=datetime.now().replace(microsecond=0), agent_ext=self.__ext) def on_close(self): cid = self.__clientID self.__rh.remove_client(self.__clientID) logger.info("| WS_CLOSED |> %s" % cid) def make_frame(self, message): opcode = 0x1 # we know that binary is false, so opcode is s1 message = tornado.escape.utf8(message) assert isinstance(message, bytes_type) finbit = 0x80 mask_bit = 0 frame = struct.pack("B", finbit | opcode) l = len(message) if l < 126: frame += struct.pack("B", l | mask_bit) elif l <= 0xFFFF: frame += struct.pack("!BH", 126 | mask_bit, l) else: frame += struct.pack("!BQ", 127 | mask_bit, l) frame += message return frame def write_frame(self, frame): try: #self._write_frame(True, opcode, message) self.stream.write(frame) except StreamClosedError: pass # self._abort() def allow_draft76(self): return True @gen.coroutine def atg_connect(self): while True: try: self.atg_stream = yield self.atg.connect( config.atghost, config.atgport) logger.info("Spawn ATG Connected at port %d", config.atgport) # Set TCP_NODELAY / disable Nagle's Algorithm. self.atg_stream.set_nodelay(True) while True: # line = yield self.atg_stream.read_bytes(1024, partial=True) # OR line = yield self.atg_stream.read_until(b"\n") logger.info("| RES-CTI |> %s" % line.decode().strip()) logger.info("| MSG-RECEIVED |> %s" % self.__clientID) rconns = self.__rh.roomate_cwsconns(self.__clientID) frame = self.make_frame(line.decode().strip()) for conn in rconns: conn.write_frame(frame) # yield gen.sleep(random.random() * 10) # yield gen.sleep(0.01) # n = line.decode().strip().split(";")[-1:] # ext = line.decode().strip().split(";", 1)[0] atg_codes = line.decode().strip().split(';') if 2 in range(len(atg_codes)): # atg_code = line.decode().strip().split(";", 2)[1] atg_code = atg_codes[1] dt = datetime.now().replace(microsecond=0) if int(atg_code) == const.CM_CTI_USER_LOGIN: logger.info("EVENTLOGGER>> Agent Login") activity.dblog_AgentLogin(self.__agentID, 1, self.__vdn_group, dt, self.__ext, 0, 0, 0, self.__ip_address) elif int(atg_code) == const.CM_DEV_AG_READY: logger.info("EVENTLOGGER>> Agent Ready") activity.dblog_AgentActivity( self.__agentID, 2, self.__vdn_group, dt, self.__ext, self.__ip_address) elif int(atg_code) == const.CM_DEV_AG_LOGOUT: logger.info("EVENTLOGGER>> Agent Logout") activity.dblog_LogAgentLogout( self.__agentID, dt, self.__ip_address, self.__vdn_group, 0, self.__ext) elif int(atg_code) == const.EV_DEV_INITIATED: logger.info("EVENTLOGGER>> Agent Call Intitial") activity.dblog_LogAgentInit(self.__agentID, dt) elif int(atg_code) == const.EV_DEV_CON_CLEAR: logger.info("EVENTLOGGER>> Agent Call On Clear") activity.dblog_LogAgentOnClear(self.__agentID, dt) elif int(atg_code) == const.EV_DEV_DELIVERED: logger.info("EVENTLOGGER>> Agent Call Offered") activity.dblog_LogAgentOffered(self.__agentID, dt) elif int(atg_code) == const.EV_DEV_ESTABLISHED: logger.info("EVENTLOGGER>> Agent Call Connected") activity.dblog_LogAgentConnected( self.__agentID, dt) elif int(atg_code) == const.EV_DEV_HELD: logger.info("EVENTLOGGER>> Agent Call Hold") activity.dblog_LogAgentHold(self.__agentID, dt) elif int(atg_code) == const.EV_DEV_RETRIEVED: logger.info("EVENTLOGGER>> Agent Call Retrive") activity.dblog_LogAgentRetrive(self.__agentID, dt) yield gen.sleep(0.5) except StreamClosedError as exc: logger.error("Error connecting to %d: %s", config.atgport, exc) yield gen.sleep(5)
def connect(self, host, port, callback=None): future = TCPClient.connect(self, host, port) future.add_done_callback(stack_context.wrap(partial(self.on_connect, callback=callback)))
def _connect(self): stream = yield TCPClient.connect(self, self.host, self.port) raise gen.Return(stream)
class Application(object): def __init__(self, routes, node, pipe): """ Application instantiates and registers handlers for each message type, and routes messages to the pre-instantiated instances of each message handler :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>) :param node: Node instance of the local node :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process """ # We don't really have to worry about synchronization # so long as we're careful about explicit context switching self.nodes = {node.node_id: node} self.local_node = node self.handlers = {} self.tcpclient = TCPClient() self.gossip_inbox = Queue() self.gossip_outbox = Queue() self.sequence_number = 0 if routes: self.add_handlers(routes) self.pipe = pipe self.ioloop = IOLoop.current() self.add_node_event = Event() def next_sequence_number(self): self.sequence_number += 1 return self.sequence_number @coroutine def ping_random_node(self): node = yield self.get_random_node() LOGGER.debug('{} pinging random node: {}'.format(self.local_node.node_id, node.node_id)) try: yield self.ping(node) except TimeoutError: self.mark_suspect(node) @coroutine def add_node(self, node): if node.node_id not in self.nodes: LOGGER.debug('Adding node {} to {}'.format(node, self.nodes)) self.add_node_event.set() self.nodes[node.node_id] = node LOGGER.debug('Added node {} to {}'.format(node, self.nodes)) @coroutine def remove_node(self, node): if node.node_id in self.nodes: del self.nodes[node.node_id] other_nodes = yield self.get_other_nodes if not other_nodes: self.add_node_event.clear() def add_handlers(self, handlers): for message_type, handler_cls in handlers: assert message_type in MESSAGE_TYPES, ( 'Message type {!r} not found in MESSAGE TYPES {}'.format( message_type, MESSAGE_TYPES.keys() ) ) self.handlers[message_type] = handler_cls(self) def route_stream_message(self, stream, message_type, message): LOGGER.debug('{!r} received {} message from {!r}'.format(self, message_type, stream)) message_cls = MESSAGE_TYPES[message_type] message_obj = message_cls(**message) handler = self.handlers[message_type] LOGGER.debug('Routing {} to {}'.format(message_type, handler)) handler(stream, message_obj) @coroutine def send_message(self, stream, message): LOGGER.debug('Sending message {!r} to {}'.format(message.MESSAGE_TYPE, stream)) try: yield stream.write(message.to_msgpack) except StreamClosedError: LOGGER.warn('Unable to send {} to {} - stream closed'.format(message.MESSAGE_TYPE, stream)) @coroutine def _get_next_message(self, stream): # get the next message from the stream unpacker = msgpack.Unpacker() try: wire_bytes = yield with_timeout( datetime.timedelta(seconds=PING_TIMEOUT), stream.read_bytes(4096, partial=True) ) except StreamClosedError: LOGGER.warn('Unable to get next message from {} - stream closed'.format(stream)) else: unpacker.feed(wire_bytes) LOGGER.debug('Deserializing object from stream {}'.format(stream)) message = unpacker.next() message.pop('type') raise Return(message) @coroutine def ping(self, node): """ Ping a node :param node: Instance of Node to ping :returns: Boolean, True if successful/False if fail """ host = node.addr port = node.port LOGGER.debug('pinging {}:{}'.format(host, port)) ping = Ping(seqno=self.next_sequence_number(), node=node, sender=self.local_node) # Connect to the node try: stream = yield self.tcpclient.connect(host, port) except StreamClosedError: LOGGER.error('Unable to connect from {} to {} (pinging host)'.format(self.local_node.node_id, node.node_id)) raise Return(False) try: # Send the ping LOGGER.debug('Sending {!r} to {!r}'.format(ping.MESSAGE_TYPE, node)) yield self.send_message(stream, ping) # Wait for an ACK message in response LOGGER.debug('Getting next message from {}:{}'.format(host, port)) message = yield self._get_next_message(stream) if message is None: raise Return(False) ack = Ack(**message) LOGGER.debug('Received {!r} from {!r} (response to {!r})'.format(ack.MESSAGE_TYPE, node.node_id, ping.MESSAGE_TYPE)) # Check that the ACK sequence number matches the PING sequence number if ack.seqno == ping.seqno: LOGGER.debug('Sequence number matches. Node {} looks good to !'.format(node.node_id, self.local_node.node_id)) # Process the gossip messages tacked onto the ACK message's payload for message in ack.payload: try: self.gossip_inbox.put_nowait(message) except QueueFull: LOGGER.error('Unable to add {} message from {} to gossip inbox'.format(message.MESSAGE_TYPE, node.node_id)) # mark the node as ALIVE in self.nodes self.mark_alive(node) # Send gossip that this node is alive self.queue_gossip_send( Alive(node=node, sender=self.local_node) ) raise Return(True) else: raise Return(False) finally: stream.close() @coroutine def ack(self, stream, seqno): payload = [] for _ in xrange(ACK_PAYLOAD_SIZE): try: gossip = self.gossip_outbox.get_nowait() payload.append(gossip) except QueueEmpty: break ack = Ack(seqno=seqno, payload=payload) LOGGER.debug('Trying to send ack: {}'.format(ack)) try: yield stream.write(ack.to_msgpack) except StreamClosedError: LOGGER.error('Unable to connect from {} to stream (acking PING)'.format(self.local_node.node_id)) LOGGER.debug('Sent ack to {}'.format(stream)) @coroutine def _change_node_state(self, node, state): """ Because Tornado has explicit context switching, we don't need to worry much about synchronization here """ LOGGER.debug('{} knows about {}: {}'.format(self.local_node.node_id, node.node_id, state)) self.add_node(node) self.nodes[node.node_id].state = state @coroutine def mark_alive(self, node): if node.node_id != self.local_node.node_id: LOGGER.debug('Marking {} ALIVE'.format(node.node_id)) self._change_node_state(node, State.ALIVE) @coroutine def mark_dead(self, node): self._change_node_state(node, State.DEAD) @coroutine def mark_suspect(self, node): self._change_node_state(node, State.SUSPECT) @coroutine def ingest_gossip_inbox(self): while True: LOGGER.debug('checking inbox') message = yield self.gossip_inbox.get() LOGGER.debug('Received message {} from gossip inbox'.format(message.MESSAGE_TYPE)) if message.MESSAGE_TYPE == Alive.MESSAGE_TYPE: self.mark_alive(message.sender) self.mark_alive(message.node) self.queue_gossip_send(message) elif message.MESSAGE_TYPE == Suspect.MESSAGE_TYPE: self.mark_alive(message.sender) self.mark_suspect(message.node) self.queue_gossip_send(message) elif message.MESSAGE_TYPE == Dead.MESSAGE_TYPE: self.mark_alive(message.sender) self.mark_dead(message.node) self.queue_gossip_send(message) @coroutine def queue_gossip_send(self, message): """ If the message is gossipable, add it to the outbox """ try: next_incarnation = message.next_incarnation next_incarnation.sender = self.local_node except message.MaxIncarnationsReached: LOGGER.debug('Max incarnations reached for {}! No gossip 4 u'.format(message.MESSAGE_TYPE)) else: LOGGER.debug('Enqueuing {} gossips for {}'.format(GOSSIP_PEERS, message)) for _ in xrange(GOSSIP_PEERS): yield self.gossip_outbox.put(next_incarnation) @coroutine def send_buffered_gossip(self): while True: random_node = yield self.get_random_node() message = yield self.gossip_outbox.get() LOGGER.debug('{} connecting to {} for gossip'.format(self.local_node, random_node)) try: stream = yield self.tcpclient.connect(random_node.addr, random_node.port) except StreamClosedError: LOGGER.error('Unable to connect from {} to {} (sending gossip)'.format(self.local_node.node_id, random_node.node_id)) LOGGER.warning('Putting the gossip back on our queue') try: self.gossip_outbox.put_nowait(message) except QueueFull: LOGGER.error('Unable to put gossip back onto the queue. Giving up!') else: try: LOGGER.debug('{} gossipping with {}'.format(self.local_node.node_id, random_node.node_id)) yield self.send_message(stream, message) finally: stream.close() @coroutine def get_other_nodes(self, exclude=None): if exclude is None: exclude = (self.local_node,) exclude_node_ids = [n.node_id for n in exclude] raise Return([n for n in self.nodes if n not in exclude_node_ids]) @coroutine def get_random_node(self, exclude=None): LOGGER.debug('Waiting for more nodes') yield self.add_node_event.wait() LOGGER.debug('Getting non-self random node') other_nodes = yield self.get_other_nodes(exclude=exclude) LOGGER.debug('{} got something! choices: {}'.format(self.local_node.node_id, other_nodes)) assert other_nodes node_id = random.choice(other_nodes) raise Return(self.nodes[node_id])
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ: self.skipTest("dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIfLocalhostV4(self): # The port used here doesn't matter, but some systems require it # to be non-zero if we do not also pass AI_PASSIVE. addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve('localhost', 80)) families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host, source_ip=None, source_port=None): port = self.start_server(family) stream = yield self.client.connect(host, port, source_ip=source_ip, source_port=source_port) server_stream = yield self.server.queue.get() with closing(stream): stream.write(b"hello") data = yield server_stream.read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, '127.0.0.1') def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, 'localhost') @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, '::1') @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith('TwistedResolver'): self.skipTest('TwistedResolver does not support multiple addresses') self.do_test_connect(socket.AF_INET6, 'localhost') def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1') @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, '::1') def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, 'localhost') @gen_test def test_refused_ipv4(self): cleanup_func, port = refusing_port() self.addCleanup(cleanup_func) with self.assertRaises(IOError): yield self.client.connect('127.0.0.1', port) def test_source_ip_fail(self): ''' Fail when trying to use the source IP Address '8.8.8.8'. ''' self.assertRaises(socket.error, self.do_test_connect, socket.AF_INET, '127.0.0.1', source_ip='8.8.8.8') def test_source_ip_success(self): ''' Success when trying to use the source IP Address '127.0.0.1' ''' self.do_test_connect(socket.AF_INET, '127.0.0.1', source_ip='127.0.0.1') @skipIfNonUnix def test_source_port_fail(self): ''' Fail when trying to use source port 1. ''' self.assertRaises(socket.error, self.do_test_connect, socket.AF_INET, '127.0.0.1', source_port=1) @gen_test def test_connect_timeout(self): timeout = 0.05 class TimeoutResolver(Resolver): def resolve(self, *args, **kwargs): return Future() # never completes with self.assertRaises(TimeoutError): yield TCPClient(resolver=TimeoutResolver()).connect( '1.2.3.4', 12345, timeout=timeout)
class EditorClient(object): """ Client for connect Referee and server worker (send and request info). Protocol description: https://checkio.atlassian.net/wiki/pages/viewpage.action?pageId=18219162 """ TERMINATOR = b"\n" ATTR_NAME_CONNECTION_ID = "user_connection_id" ATTR_NAME_DOCKER_ID = "docker_id" def __init__(self, host, port, user_connection_id, docker_id, io_loop): self.__host = host self.__port = port self.__user_connection_id = user_connection_id self.__docker_id = docker_id self._io_loop = io_loop self.client = TCPClient(io_loop=self._io_loop) self._stream = None self._requests = dict() self._requests_signals = { packet.InPacket.METHOD_SELECT_RESULT: Signal("data"), packet.InPacket.METHOD_GET_STATUS: Signal("data"), packet.InPacket.METHOD_CANCEL: Signal("data"), } @gen.coroutine def connect(self): try: yield self._connect(self.__host, self.__port) except IOError as e: logger.error(e, exc_info=True) raise self._read() return True @gen.coroutine def _connect(self, host, port): self._stream = yield self.client.connect(host=host, port=port) self._confirm_connection() def set_close_callback(self, callback): self._stream.set_close_callback(callback) @gen.coroutine def _write(self, method, data=None, request_id=None): if self._stream.closed(): raise EditorPacketStructureError("Connection is closed") message = packet.OutPacket(method, data, request_id).encode() try: yield self._stream.write(message + self.TERMINATOR) except Exception as e: logger.error(e, exc_info=True) else: logger.debug("EditorClient:: send: {}".format(message)) def _read(self): self._stream.read_until(self.TERMINATOR, self._on_data) def _on_data(self, data): logger.debug("UserClient:: received: {}".format(data)) if data is None: logger.error("UserClient:: received") else: try: pkt = packet.InPacket.decode(data) except EditorPacketStructureError as e: logger.error(e, exc_info=True) else: if pkt.request_id is not None: f = self._requests[pkt.request_id] f.set_result(result=pkt.data) del self._requests[pkt.request_id] signal = self._requests_signals[pkt.method] signal.send(data=pkt.data) self._read() def add_cancel_callback(self, callback): self.add_data_callback(packet.InPacket.METHOD_CANCEL, callback) def add_data_callback(self, request_method, callback): if request_method not in self._requests_signals.keys(): raise Exception("Undefined request method {}".format(request_method)) signal = self._requests_signals[request_method] signal.connect(callback) def send_select_data(self, data): request_id = uuid.uuid4().hex self._write(packet.OutPacket.METHOD_SELECT, data, request_id) self._requests[request_id] = gen.Future() return self._requests[request_id] @gen.coroutine def send_stderr(self, line): yield self._write(packet.OutPacket.METHOD_STDERR, line) @gen.coroutine def send_stdout(self, line): yield self._write(packet.OutPacket.METHOD_STDOUT, line) @gen.coroutine def send_check_result(self, success, code, points=None, additional_data=None): yield self.send_result( action=packet.RESULT_ACTION_CHECK, success=success, code=code, points=points, additional_data=additional_data, ) @gen.coroutine def send_try_it_result(self, success, code, points=None, additional_data=None): yield self.send_result( action=packet.RESULT_ACTION_TRY_IT, success=success, code=code, points=points, additional_data=additional_data, ) @gen.coroutine def send_run_finish(self, code): yield self.send_result(action=packet.RESULT_ACTION_RUN, success=True, code=code) @gen.coroutine def send_pre_test(self, data): yield self._write(packet.OutPacket.METHOD_PRE_TEST, data) @gen.coroutine def send_post_test(self, data): yield self._write(packet.OutPacket.METHOD_POST_TEST, data) @gen.coroutine def send_result(self, action, success, code, points=None, additional_data=None): if action not in (packet.RESULT_ACTION_CHECK, packet.RESULT_ACTION_TRY_IT, packet.RESULT_ACTION_RUN): raise EditorPacketStructureError("REFEREE:: Sent to editor action is incorrect: {}".format(action)) data = {"action": action, "success": bool(success), "code": code} if points is not None: data["points"] = points if additional_data is not None: data["additional_data"] = additional_data yield self._write(packet.OutPacket.METHOD_RESULT, data) @gen.coroutine def send_error(self, message, traceback=None): data = {"message": message} if traceback is not None: data["traceback"] = traceback yield self._write(packet.OutPacket.METHOD_ERROR, data) @gen.coroutine def send_status(self, status_data): yield self._write(packet.OutPacket.METHOD_STATUS, status_data) @gen.coroutine def send_custom(self, data): yield self._write(packet.OutPacket.METHOD_CUSTOM, data) @gen.coroutine def send_battle(self, data): yield self._write(packet.OutPacket.METHOD_BATTLE, data) @gen.coroutine def _confirm_connection(self): """ Only after client send connection id, server will start send data. Until that, server will skipp all requests. """ yield self._write( packet.OutPacket.METHOD_SET, {self.ATTR_NAME_CONNECTION_ID: self.__user_connection_id, self.ATTR_NAME_DOCKER_ID: self.__docker_id}, )
class TCPClientTest(AsyncTestCase): def setUp(self): super(TCPClientTest, self).setUp() self.server = None self.client = TCPClient() def start_server(self, family): if family == socket.AF_UNSPEC and 'TRAVIS' in os.environ: self.skipTest( "dual-stack servers often have port conflicts on travis") self.server = TestTCPServer(family) return self.server.port def stop_server(self): if self.server is not None: self.server.stop() self.server = None def tearDown(self): self.client.close() self.stop_server() super(TCPClientTest, self).tearDown() def skipIfLocalhostV4(self): Resolver().resolve('localhost', 0, callback=self.stop) addrinfo = self.wait() families = set(addr[0] for addr in addrinfo) if socket.AF_INET6 not in families: self.skipTest("localhost does not resolve to ipv6") @gen_test def do_test_connect(self, family, host): port = self.start_server(family) stream = yield self.client.connect(host, port) with closing(stream): stream.write(b"hello") data = yield self.server.streams[0].read_bytes(5) self.assertEqual(data, b"hello") def test_connect_ipv4_ipv4(self): self.do_test_connect(socket.AF_INET, '127.0.0.1') def test_connect_ipv4_dual(self): self.do_test_connect(socket.AF_INET, 'localhost') @skipIfNoIPv6 def test_connect_ipv6_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_INET6, '::1') @skipIfNoIPv6 def test_connect_ipv6_dual(self): self.skipIfLocalhostV4() if Resolver.configured_class().__name__.endswith('TwistedResolver'): self.skipTest( 'TwistedResolver does not support multiple addresses') self.do_test_connect(socket.AF_INET6, 'localhost') def test_connect_unspec_ipv4(self): self.do_test_connect(socket.AF_UNSPEC, '127.0.0.1') @skipIfNoIPv6 def test_connect_unspec_ipv6(self): self.skipIfLocalhostV4() self.do_test_connect(socket.AF_UNSPEC, '::1') def test_connect_unspec_dual(self): self.do_test_connect(socket.AF_UNSPEC, 'localhost') @gen_test def test_refused_ipv4(self): sock, port = bind_unused_port() sock.close() with self.assertRaises(IOError): yield self.client.connect('127.0.0.1', port)