示例#1
0
    def __init__(self,
                 host,
                 port,
                 io_loop,
                 ssl_options,
                 max_connect_backoff=MAX_CONNECT_BACKOFF,
                 initial_window_size=DEFAULT_WINDOW_SIZE,
                 connect_timeout=5,
                 connect_callback=None):
        self.host = host
        self.port = port
        self.io_loop = io_loop

        self.tcp_client = TCPClient()

        self.h2conn = None
        self.io_stream = None
        self.window_manager = None
        self.connect_timeout = connect_timeout
        self._connect_timeout_handle = None
        self._connect_future = None

        self._streams = {}
        self.ssl_context = None
        self.ssl_options = ssl_options

        self.parse_ssl_opts()

        self.initial_window_size = initial_window_size
        self.max_connect_backoff = max_connect_backoff
        self.consecutive_connect_fails = 0
        self.connect_callback = connect_callback

        self._closed = False
示例#2
0
def _run_traffic_jam(nsends, nbytes):
    # This test eats `nsends * nbytes` bytes in RAM
    np = pytest.importorskip('numpy')
    from distributed.protocol import to_serialize
    data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data)
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=0.01)
        b.start(stream)

        msg = {'x': to_serialize(data)}
        for i in range(nsends):
            b.send(assoc(msg, 'i', i))
            if np.random.random() > 0.5:
                yield gen.sleep(0.001)

        results = []
        count = 0
        while len(results) < nsends:
            # If this times out then I think it's a backpressure issue
            # Somehow we're able to flood the socket so that the receiving end
            # loses some of our messages
            L = yield gen.with_timeout(timedelta(seconds=5), read(stream))
            count += 1
            results.extend(r['i'] for r in L)

        assert count == b.batch_count == e.count
        assert b.message_count == nsends

        assert results == list(range(nsends))

        stream.close()  # external closing
        yield b.close(ignore_closed=True)
class TCPClientTest(AsyncTestCase):
    def setUp(self):
        super(TCPClientTest, self).setUp()
        self.server = None
        self.client = TCPClient()

    def start_server(self, family):
        if family == socket.AF_UNSPEC and "TRAVIS" in os.environ:
            self.skipTest("dual-stack servers often have port conflicts on travis")
        self.server = TestTCPServer(family)
        return self.server.port

    def stop_server(self):
        if self.server is not None:
            self.server.stop()
            self.server = None

    def tearDown(self):
        self.client.close()
        self.stop_server()
        super(TCPClientTest, self).tearDown()

    def skipIf10.0.0.7V4(self):
        # The port used here doesn't matter, but some systems require it
        # to be non-zero if we do not also pass AI_PASSIVE.
        addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("10.0.0.7", 80))
        families = set(addr[0] for addr in addrinfo)
        if socket.AF_INET6 not in families:
            self.skipTest("10.0.0.7 does not resolve to ipv6")
示例#4
0
    def __init__(self, routes, node, pipe):
        """
        Application instantiates and registers handlers for each message type,
        and routes messages to the pre-instantiated instances of each message handler

        :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
        :param node: Node instance of the local node
        :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
        """
        # We don't really have to worry about synchronization
        # so long as we're careful about explicit context switching
        self.nodes = {node.node_id: node}

        self.local_node = node
        self.handlers = {}

        self.tcpclient = TCPClient()

        self.gossip_inbox = Queue()
        self.gossip_outbox = Queue()

        self.sequence_number = 0

        if routes:
            self.add_handlers(routes)

        self.pipe = pipe
        self.ioloop = IOLoop.current()

        self.add_node_event = Event()
示例#5
0
    def __init__(self, io_loop, request, on_message_callback=None,
                 compression_options=None):
        self.compression_options = compression_options
        self.connect_future = TracebackFuture()
        self.protocol = None
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))
        self._on_message_callback = on_message_callback

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })
        if self.compression_options is not None:
            # Always offer to let the server set our max_wbits (and even though
            # we don't offer it, we will accept a client_no_context_takeover
            # from the server).
            # TODO: set server parameters for deflate extension
            # if requested in self.compression_options.
            request.headers['Sec-WebSocket-Extensions'] = (
                'permessage-deflate; client_max_window_bits')

        self.tcp_client = TCPClient(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response,
            104857600, self.tcp_client, 65536)
示例#6
0
 def connect(self):
     client = TCPClient()
     self.stream = yield client.connect(self.host, self.port)
     # sock = None
     # try:
     #     if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
     #         sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     #         t = sock.gettimeout()
     #         sock.settimeout(self.connect_timeout)
     #         sock.connect(self.unix_socket)
     #         sock.settimeout(t)
     #         self.host_info = "Localhost via UNIX socket"
     #         if DEBUG: print('connected using unix_socket')
     #     else:
     #         while True:
     #             try:
     #                 sock = socket.create_connection(
     #                         (self.host, self.port), self.connect_timeout)
     #                 break
     #             except (OSError, IOError) as e:
     #                 if e.errno == errno.EINTR:
     #                     continue
     #                 raise
     #         self.host_info = "socket %s:%d" % (self.host, self.port)
     #         if DEBUG: print('connected using socket')
     #     sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
     #     if self.no_delay:
     #         sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
     #     self.socket = sock
     #     self._rfile = _makefile(sock, 'rb')
     yield self._get_server_information()
     yield self._request_authentication()
示例#7
0
    def __init__(self):
        debug('Starting Envisalink Client')

        # Register events for alarmserver requests -> envisalink
        events.register('alarm_update', self.request_action)

        # Register events for envisalink proxy
        events.register('envisalink', self.envisalink_proxy)

        # Create TCP Client
        self.tcpclient = TCPClient()

        # Connection
        self._connection = None

        # Set our terminator to \r\n
        self._terminator = b"\r\n"

        # Reconnect delay
        self._retrydelay = 10

        # Connect to Envisalink
        self.do_connect()

        # Setup timer to refresh envisalink
        tornado.ioloop.PeriodicCallback(self.check_connection, 1000).start()

        # Last activity
        self._last_activity = time.time()
示例#8
0
def test_BatchedSend():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        assert str(len(b.buffer)) in str(b)
        assert str(len(b.buffer)) in repr(b)
        b.start(stream)

        yield gen.sleep(0.020)

        b.send('hello')
        b.send('hello')
        b.send('world')
        yield gen.sleep(0.020)
        b.send('HELLO')
        b.send('HELLO')

        result = yield read(stream)
        assert result == ['hello', 'hello', 'world']
        result = yield read(stream)
        assert result == ['HELLO', 'HELLO']

        assert b.byte_count > 1
示例#9
0
def whois_async(query, fields=None):
    """
    Perform whois request
    :param query:
    :param fields:
    :return:
    """
    logger.debug("whois %s", query)
    # Get appropriate whois server
    if is_fqdn(query):
        # Use TLD.whois-servers.net for domain lookup
        tld = query.split(".")[-1]
        server = "%s.whois-servers.net" % tld
    else:
        server = DEFAULT_WHOIS_SERVER
    # Perform query
    try:
        client = TCPClient()
        stream = yield client.connect(server, DEFAULT_WHOIS_PORT)
    except IOError as e:
        logger.error("Cannot resolve host '%s': %s", server, e)
        raise tornado.gen.Return()
    try:
        yield stream.write(str(query) + "\r\n")
        data = yield stream.read_until_close()
    finally:
        yield stream.close()
    data = parse_response(data)
    if fields:
        data = [(k, v) for k, v in data if k in fields]
    raise tornado.gen.Return(data)
示例#10
0
def _run_traffic_jam(nsends, nbytes):
    # This test eats `nsends * nbytes` bytes in RAM
    np = pytest.importorskip('numpy')
    from distributed.protocol import to_serialize
    data = bytes(np.random.randint(0, 255, size=(nbytes, )).astype('u1').data)
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=0.01)
        b.start(stream)

        msg = {'x': to_serialize(data)}
        for i in range(nsends):
            b.send(assoc(msg, 'i', i))
            if np.random.random() > 0.5:
                yield gen.sleep(0.001)

        results = []
        count = 0
        while len(results) < nsends:
            # If this times out then I think it's a backpressure issue
            # Somehow we're able to flood the socket so that the receiving end
            # loses some of our messages
            L = yield gen.with_timeout(timedelta(seconds=5), read(stream))
            count += 1
            results.extend(r['i'] for r in L)

        assert count == b.batch_count == e.count
        assert b.message_count == nsends

        assert results == list(range(nsends))

        stream.close()  # external closing
        yield b.close(ignore_closed=True)
示例#11
0
    def __init__(self, *args, **kwargs):

        TCPClient.__init__(self, kwargs.pop("resolver", None))

        Connection.__init__(self, parser_class=AsyncParser, *args, **kwargs)

        self._stream = None
示例#12
0
    def connect(self, address, deserialize=True, **connection_args):
        self._check_encryption(address, connection_args)
        ip, port = parse_host_port(address)
        kwargs = self._get_connect_args(**connection_args)

        client = TCPClient()
        try:
            stream = yield client.connect(ip, port,
                                          max_buffer_size=MAX_BUFFER_SIZE,
                                          **kwargs)
            # Under certain circumstances tornado will have a closed connnection with an error and not raise
            # a StreamClosedError.
            #
            # This occurs with tornado 5.x and openssl 1.1+
            if stream.closed() and stream.error:
                raise StreamClosedError(stream.error)

        except StreamClosedError as e:
            # The socket connect() call failed
            convert_stream_closed_error(self, e)

        local_address = self.prefix + get_stream_address(stream)
        raise gen.Return(self.comm_class(stream,
                                         local_address,
                                         self.prefix + address,
                                         deserialize))
示例#13
0
def test_stress():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)
        L = []

        @gen.coroutine
        def send():
            b = BatchedSend(interval=3)
            b.start(stream)
            for i in range(0, 10000, 2):
                b.send(i)
                b.send(i + 1)
                yield gen.sleep(0.00001 * random.randint(1, 10))

        @gen.coroutine
        def recv():
            while True:
                result = yield gen.with_timeout(timedelta(seconds=1),
                                                read(stream))
                print(result)
                L.extend(result)
                if result[-1] == 9999:
                    break

        yield All([send(), recv()])

        assert L == list(range(0, 10000, 1))
        stream.close()
示例#14
0
 def __init__(self, endpoint=DEFAULT_ENDPOINT_DEALER, timeout=5.0):
     self.endpoint = endpoint
     self._id = cast_bytes(uuid.uuid4().hex)
     self.timeout = timeout
     self.stream = None
     self.endpoint = endpoint
     self.client = TCPClient()
示例#15
0
 def connect(self):
     client = TCPClient(io_loop=self.io_loop)
     self.stream = yield client.connect(self.host, self.port)
     # sock = None
     # try:
     #     if self.unix_socket and self.host in ('localhost', '127.0.0.1'):
     #         sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     #         t = sock.gettimeout()
     #         sock.settimeout(self.connect_timeout)
     #         sock.connect(self.unix_socket)
     #         sock.settimeout(t)
     #         self.host_info = "Localhost via UNIX socket"
     #         if DEBUG: print('connected using unix_socket')
     #     else:
     #         while True:
     #             try:
     #                 sock = socket.create_connection(
     #                         (self.host, self.port), self.connect_timeout)
     #                 break
     #             except (OSError, IOError) as e:
     #                 if e.errno == errno.EINTR:
     #                     continue
     #                 raise
     #         self.host_info = "socket %s:%d" % (self.host, self.port)
     #         if DEBUG: print('connected using socket')
     #     sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
     #     if self.no_delay:
     #         sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
     #     self.socket = sock
     #     self._rfile = _makefile(sock, 'rb')
     yield self._get_server_information()
     yield self._request_authentication()
示例#16
0
    def initialize(self,
                   max_clients=10,
                   hostname_mapping=None,
                   max_buffer_size=104857600,
                   resolver=None,
                   defaults=None,
                   max_header_size=None,
                   max_body_size=None):
        """Creates a AsyncHTTPClient.

        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        ``force_instance=True`` may be used to suppress this behavior.

        Note that because of this implicit reuse, unless ``force_instance``
        is used, only the first call to the constructor actually uses
        its arguments. It is recommended to use the ``configure`` method
        instead of the constructor to ensure that arguments take effect.

        ``max_clients`` is the number of concurrent requests that can be
        in progress; when this limit is reached additional requests will be
        queued. Note that time spent waiting in this queue still counts
        against the ``request_timeout``.

        ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like ``/etc/hosts`` is not possible or desirable (e.g. in
        unittests).

        ``max_buffer_size`` (default 100MB) is the number of bytes
        that can be read into memory at once. ``max_body_size``
        (defaults to ``max_buffer_size``) is the largest response body
        that the client will accept.  Without a
        ``streaming_callback``, the smaller of these two limits
        applies; with a ``streaming_callback`` only ``max_body_size``
        does.

        .. versionchanged:: 4.2
           Added the ``max_body_size`` argument.
        """
        super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
        self.max_clients = max_clients
        self.queue = collections.deque()
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size
        self.max_body_size = max_body_size
        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver()
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        self.tcp_client = TCPClient(resolver=self.resolver)
示例#17
0
def test_stress():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)
        L = []

        @gen.coroutine
        def send():
            b = BatchedSend(interval=3)
            b.start(stream)
            for i in range(0, 10000, 2):
                b.send(i)
                b.send(i + 1)
                yield gen.sleep(0.00001 * random.randint(1, 10))

        @gen.coroutine
        def recv():
            while True:
                result = yield gen.with_timeout(timedelta(seconds=1), read(stream))
                print(result)
                L.extend(result)
                if result[-1] == 9999:
                    break

        yield All([send(), recv()])

        assert L == list(range(0, 10000, 1))
        stream.close()
示例#18
0
    def __init__(self, *args, **kwargs):

        TCPClient.__init__(self, kwargs.pop("resolver", None),
                           kwargs.pop("io_loop", None))

        Connection.__init__(self, parser_class=AsyncParser, *args, **kwargs)

        self._stream = None
示例#19
0
def test_close_twice():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.close()
        yield b.close()
示例#20
0
    def connect(self, timeout):
        deadline = None if timeout is None else self._io_loop.time() + timeout

        try:
            if len(self._parent.ssl) > 0:
                ssl_options = {}
                if self._parent.ssl["ca_certs"]:
                    ssl_options['ca_certs'] = self._parent.ssl["ca_certs"]
                    ssl_options['cert_reqs'] = 2  # ssl.CERT_REQUIRED
                stream_future = TCPClient().connect(self._parent.host,
                                                    self._parent.port,
                                                    ssl_options=ssl_options)
            else:
                stream_future = TCPClient().connect(self._parent.host,
                                                    self._parent.port)

            self._stream = yield with_absolute_timeout(
                deadline,
                stream_future,
                io_loop=self._io_loop,
                quiet_exceptions=(iostream.StreamClosedError))
        except Exception as err:
            raise ReqlDriverError(
                'Could not connect to %s:%s. Error: %s' %
                (self._parent.host, self._parent.port, str(err)))

        self._stream.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,
                                       1)
        self._stream.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE,
                                       1)

        try:
            self._stream.write(self._parent.handshake)
            response = yield with_absolute_timeout(
                deadline,
                self._stream.read_until(b'\0'),
                io_loop=self._io_loop,
                quiet_exceptions=(iostream.StreamClosedError))
        except Exception as err:
            raise ReqlDriverError(
                'Connection interrupted during handshake with %s:%s. Error: %s'
                % (self._parent.host, self._parent.port, str(err)))

        message = decodeUTF(response[:-1]).split('\n')[0]

        if message != 'SUCCESS':
            yield self.close(False, None)
            if message == "ERROR: Incorrect authorization key":
                raise ReqlAuthError(self._parent.host, self._parent.port)
            else:
                raise ReqlDriverError(
                    'Server dropped connection with message: "%s"' %
                    (message, ))

        # Start a parallel function to perform reads
        self._io_loop.add_callback(self._reader)
        raise gen.Return(self._parent)
示例#21
0
def test_close_twice():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.close()
        yield b.close()
示例#22
0
    def start(self):
        self.client = TCPClient()

        self.pcb = PeriodicCallback(self.send, 1000.0 / self.n)
        self.pcb.start()

        IOLoop.current().call_later(self.duration + 0.5, self.stop)
        IOLoop.current().start()
        IOLoop.clear_current()
示例#23
0
class CircleClient(object):
    def __init__(self, endpoint=DEFAULT_ENDPOINT_DEALER, timeout=5.0):
        self.endpoint = endpoint
        self._id = cast_bytes(uuid.uuid4().hex)
        self.timeout = timeout
        self.stream = None
        self.endpoint = endpoint
        self.client = TCPClient()

    def stop(self):
        self.client.close()

    def send_message(self, command, **props):
        return self.call(make_message(command, **props))

    def call(self, cmd):
        result = IOLoop.instance().run_sync(lambda: self._call(cmd))
        return result

    @gen.coroutine
    def _call(self, cmd):
        if isinstance(cmd, basestring):
            raise DeprecationWarning('call() takes a mapping')

        call_id = uuid.uuid4().hex
        cmd['id'] = call_id
        host, port = self.endpoint.split(':')

        try:
            cmd = json.dumps(cmd)
            self.stream = yield gen_timeout(self.timeout,
                                            self.client.connect(host, port))

            yield gen_timeout(self.timeout, self.stream.write(cmd + MSG_END))
        except StreamClosedError:
            raise CallError("Can't connect circled. Maybe it is closed.")
        except gen.TimeoutError:
            raise CallError('Connect timed out ({} seconds).'.format(
                self.timeout))
        except ValueError as e:
            raise CallError(str(e))

        while True:
            try:
                msg = yield gen_timeout(self.timeout,
                                        self.stream.read_until(MSG_END))
                msg = rstrip(msg, MSG_END)
                res = json.loads(msg)
                if 'id' in res and res['id'] not in (call_id, None):
                    # we got the wrong message
                    continue
                raise gen.Return(res)
            except gen.TimeoutError:
                raise CallError('Run timed out ({} seconds).'.format(
                    self.timeout))
            except ValueError as e:
                raise CallError(str(e))
示例#24
0
 def __init__(self, ssl_options=None, gp_module=False, **kwargs):
     self.logger = logging.getLogger(self.__class__.__name__)
     self.gp_module = gp_module
     try:
         TCPClient.__init__(self, ssl_options=ssl_options, **kwargs)
     except:
         etype, evalue, etb = sys.exc_info()
         self.logger.error("Could not create tcp client. Exception: %s, Error: %s." % (etype, evalue))
         self.gp_module.shutDown()
示例#25
0
def test_send_after_stream_finish():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        b.send('hello')
        result = yield read(stream); assert result == ['hello']
    def connect(self, host, port):
        self.host = host
        self.port = port

        client = TCPClient()
        try:
            self.stream = yield client.connect(self.host, self.port)
        except IOError as e:
            log.error("%s", repr(e))
            raise gen.Return((False, 'Failed to connect'))
        self.trigger(Event.CONNECT, self)
        raise gen.Return((True, "OK"))
示例#27
0
def test_close_closed():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)

        b.send(123)
        stream.close()  # external closing

        yield b.close(ignore_closed=True)
示例#28
0
def handle_stream():
    while True:
        ip = '192.168.199.126'
        ip = '45.77.214.165'
        yield gen.sleep(3)
        streamCli = yield TCPClient().connect(ip, 8889)
        stream = yield TCPClient().connect('192.168.199.126', 5900)
        print 'client ok'
        IOLoop.instance().add_callback(
            functools.partial(run_client, stream, streamCli))
        IOLoop.instance().add_callback(
            functools.partial(run_client, streamCli, stream))
示例#29
0
def test_send_after_stream_finish():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        b.send('hello')
        result = yield read(stream)
        assert result == ['hello']
示例#30
0
def test_close_closed():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)

        b.send(123)
        stream.close()  # external closing

        yield b.close(ignore_closed=True)
示例#31
0
def test_send_before_start():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)

        b.send('hello')
        b.send('world')

        b.start(stream)
        result = yield read(stream); assert result == ['hello', 'world']
示例#32
0
def connect(ip, port, timeout=1):
    client = TCPClient()
    start = time()
    while True:
        try:
            stream = yield client.connect(ip, port)
            raise Return(stream)
        except StreamClosedError:
            if time() - start < timeout:
                yield gen.sleep(0.01)
                logger.debug("sleeping on connect")
            else:
                raise
示例#33
0
文件: network.py 项目: xlybaby/VAR
 def __init__(self,
              p_mq,
              p_server_map,
              p_callback=None,
              max_buffer_size=None,
              timeout=None):
     self._host = p_server_map["host"]
     self._port = p_server_map["port"]
     self._max_buf_size = max_buffer_size
     self._timeout = timeout
     self._mq = p_mq
     self._handler = p_callback
     self._tcpclient = TCPClient()
示例#34
0
    def connect(self, address, deserialize=True):
        ip, port = parse_host_port(address)

        client = TCPClient()
        try:
            stream = yield client.connect(ip,
                                          port,
                                          max_buffer_size=MAX_BUFFER_SIZE)
        except StreamClosedError as e:
            # The socket connect() call failed
            convert_stream_closed_error(e)

        raise gen.Return(TCP(stream, 'tcp://' + address, deserialize))
示例#35
0
def connect(ip, port, timeout=1):
    client = TCPClient()
    start = time()
    while True:
        try:
            stream = yield client.connect(ip, port)
            raise Return(stream)
        except StreamClosedError:
            if time() - start < timeout:
                yield gen.sleep(0.01)
                logger.debug("sleeping on connect")
            else:
                raise
示例#36
0
    def initialize(self,
                   io_loop,
                   max_clients=10,
                   hostname_mapping=None,
                   max_buffer_size=104857600,
                   resolver=None,
                   defaults=None,
                   max_header_size=None):
        """Creates a AsyncHTTPClient.

        # 一个IOLoop实例只有一个AsyncHTTPClient实例
        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        force_instance=True may be used to suppress this behavior.

        max_clients is the number of concurrent requests that can be
        in progress.  Note that this arguments are only used when the
        client is first created, and will be ignored when an existing
        client is reused.

        hostname_mapping is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like /etc/hosts is not possible or desirable (e.g. in
        unittests).

        max_buffer_size is the number of bytes that can be read by IOStream. It
        defaults to 100mb.
        """
        super(SimpleAsyncHTTPClient, self).initialize(io_loop,
                                                      defaults=defaults)
        self.max_clients = max_clients  # 最大连接数
        self.queue = collections.deque()  # 来一个小小的队列
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size

        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        # 这里估计是一个解析DNS的配置,先不理会
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        # 这里来了一个tcp的连接
        self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
示例#37
0
def start_app():
  tcpClient = TCPClient()
  try:
    stream = yield tcpClient.connect('127.0.0.1', 9999)
    print 'Connection started'
    app = LaternController(LanternDriver())
    client = TLVClient(stream)
    executer = ThreadPoolExecutor(max_workers=5)
    while True:
      command = yield client.getCommand()
      executer.submit(app.handle, command)
  except Exception as e:
    print 'Caught Error: %s' % e
    IOLoop.instance().add_callback(IOLoop.instance().stop)
示例#38
0
 def __init__(self, host, port, user_connection_id, docker_id, io_loop):
     self.__host = host
     self.__port = port
     self.__user_connection_id = user_connection_id
     self.__docker_id = docker_id
     self._io_loop = io_loop
     self.client = TCPClient(io_loop=self._io_loop)
     self._stream = None
     self._requests = dict()
     self._requests_signals = {
         packet.InPacket.METHOD_SELECT_RESULT: Signal('data'),
         packet.InPacket.METHOD_GET_STATUS: Signal('data'),
         packet.InPacket.METHOD_CANCEL: Signal('data'),
     }
示例#39
0
    def __init__(self,
                 io_loop=None,
                 max_clients=10,
                 max_buffer_size=None,
                 max_response_size=None):
        self._io_loop = io_loop or IOLoop.instance()
        self.max_clients = max_clients
        self.max_buffer_size = max_buffer_size or 104857600  # 100M
        self.max_response_size = max_response_size or 10 * 1024 * 1024  # 10M

        self.queue = collections.deque()
        self.active = {}
        self.waiting = {}
        self._client_closed = False
        self.tcp_client = TCPClient(io_loop=self._io_loop)
示例#40
0
def test_BatchedStream_raises():
    port = 3435
    server = MyServer()
    server.listen(port)

    client = TCPClient()
    stream = yield client.connect('127.0.0.1', port)
    b = BatchedStream(stream, interval=20)

    stream.close()

    with pytest.raises(StreamClosedError):
        yield b.recv()

    with pytest.raises(StreamClosedError):
        yield b.send('123')
示例#41
0
def connect(ip, port, timeout=3):
    client = TCPClient()
    start = time()
    while True:
        try:
            future = client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE)
            stream = yield gen.with_timeout(timedelta(seconds=timeout), future)
            raise Return(stream)
        except StreamClosedError:
            if time() - start < timeout:
                yield gen.sleep(0.01)
                logger.debug("sleeping on connect")
            else:
                raise
        except gen.TimeoutError:
            raise IOError("Timed out while connecting to %s:%d" % (ip, port))
示例#42
0
class GatewayConnector(Connector):
    _executor = ThreadPoolExecutor(2)
    _resolver = netutil.ExecutorResolver(close_executor=False,
                                         executor=_executor)
    client = TCPClient(resolver=_resolver)

    async def connect(self, address, deserialize=True, **connection_args):
        ip, port, path = parse_gateway_address(address)
        sni = "daskgateway-" + path
        ctx = connection_args.get("ssl_context")
        if not isinstance(ctx, ssl.SSLContext):
            raise TypeError("Gateway expects a `ssl_context` argument of type "
                            "ssl.SSLContext, instead got %s" % ctx)

        try:
            plain_stream = await self.client.connect(
                ip, port, max_buffer_size=MAX_BUFFER_SIZE)
            stream = await plain_stream.start_tls(False,
                                                  ssl_options=ctx,
                                                  server_hostname=sni)
            if stream.closed() and stream.error:
                raise StreamClosedError(stream.error)

        except StreamClosedError as e:
            # The socket connect() call failed
            convert_stream_closed_error(self, e)

        local_address = "tls://" + get_stream_address(stream)
        peer_address = "gateway://" + address
        return TLS(stream, local_address, peer_address, deserialize)
示例#43
0
    def __init__(self):
        logger.debug('Starting Envisalink Client')

        # Register events for alarmserver requests -> envisalink
        events.register('alarm_update', self.request_action)

        # Register events for envisalink proxy
        events.register('envisalink', self.envisalink_proxy)

        # Create TCP Client
        self.tcpclient = TCPClient()

        # Connection
        self._connection = None

        # Set our terminator to \r\n
        self._terminator = b"\r\n"

        # Reconnect delay
        self._retrydelay = 10

        # Connect to Envisalink
        self.do_connect()

        # Setup timer to refresh envisalink
        tornado.ioloop.PeriodicCallback(self.check_connection, 1000).start()

        # Last activity
        self._last_activity = time.time()
示例#44
0
    def __init__(self, io_loop, request, on_message_callback=None,
                 compression_options=None):
        self.compression_options = compression_options
        self.connect_future = TracebackFuture()
        self.protocol = None
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))
        self._on_message_callback = on_message_callback
        self.close_code = self.close_reason = None

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })
        if self.compression_options is not None:
            # Always offer to let the server set our max_wbits (and even though
            # we don't offer it, we will accept a client_no_context_takeover
            # from the server).
            # TODO: set server parameters for deflate extension
            # if requested in self.compression_options.
            request.headers['Sec-WebSocket-Extensions'] = (
                'permessage-deflate; client_max_window_bits')

        self.tcp_client = TCPClient(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response,
            104857600, self.tcp_client, 65536, 104857600)
示例#45
0
class BaseTCPConnector(Connector, RequireEncryptionMixin):
    _executor = ThreadPoolExecutor(2, thread_name_prefix="TCP-Executor")
    _resolver = netutil.ExecutorResolver(close_executor=False, executor=_executor)
    client = TCPClient(resolver=_resolver)

    async def connect(self, address, deserialize=True, **connection_args):
        self._check_encryption(address, connection_args)
        ip, port = parse_host_port(address)
        kwargs = self._get_connect_args(**connection_args)

        try:
            stream = await self.client.connect(
                ip, port, max_buffer_size=MAX_BUFFER_SIZE, **kwargs
            )

            # Under certain circumstances tornado will have a closed connnection with an error and not raise
            # a StreamClosedError.
            #
            # This occurs with tornado 5.x and openssl 1.1+
            if stream.closed() and stream.error:
                raise StreamClosedError(stream.error)

        except StreamClosedError as e:
            # The socket connect() call failed
            convert_stream_closed_error(self, e)

        local_address = self.prefix + get_stream_address(stream)
        return self.comm_class(
            stream, local_address, self.prefix + address, deserialize
        )
示例#46
0
    def __init__(self, io_loop, request, compression_options=None):
        self.compression_options = compression_options
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(":")
        scheme = {"ws": "http", "wss": "https"}[scheme]
        request.url = scheme + sep + rest
        request.headers.update(
            {
                "Upgrade": "websocket",
                "Connection": "Upgrade",
                "Sec-WebSocket-Key": self.key,
                "Sec-WebSocket-Version": "13",
            }
        )
        if self.compression_options is not None:
            # Always offer to let the server set our max_wbits (and even though
            # we don't offer it, we will accept a client_no_context_takeover
            # from the server).
            # TODO: set server parameters for deflate extension
            # if requested in self.compression_options.
            request.headers["Sec-WebSocket-Extensions"] = "permessage-deflate; client_max_window_bits"

        self.tcp_client = TCPClient(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.tcp_client, 65536
        )
示例#47
0
文件: app.py 项目: jefffm/swimpy
    def __init__(self, routes, node, pipe):
        """
        Application instantiates and registers handlers for each message type,
        and routes messages to the pre-instantiated instances of each message handler

        :param routes: list of tuples in the form of (<message type str>, <MessageHandler class>)
        :param node: Node instance of the local node
        :param pipe: Instance of multiprocessing.Pipe for communicating with the parent process
        """
        # We don't really have to worry about synchronization
        # so long as we're careful about explicit context switching
        self.nodes = {node.node_id: node}

        self.local_node = node
        self.handlers = {}

        self.tcpclient = TCPClient()

        self.gossip_inbox = Queue()
        self.gossip_outbox = Queue()

        self.sequence_number = 0

        if routes:
            self.add_handlers(routes)

        self.pipe = pipe
        self.ioloop = IOLoop.current()

        self.add_node_event = Event()
示例#48
0
    def initialize(self, io_loop=None, max_clients=10,
                   max_simultaneous_connections=None,
                   hostname_mapping=None, max_buffer_size=104857600,
                   resolver=None, defaults=None, max_header_size=None,
                   max_body_size=None):
        """Creates a AsyncHTTPClient.

        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        ``force_instance=True`` may be used to suppress this behavior.

        Note that because of this implicit reuse, unless ``force_instance``
        is used, only the first call to the constructor actually uses
        its arguments. It is recommended to use the ``configure`` method
        instead of the constructor to ensure that arguments take effect.

        ``max_clients`` is the number of concurrent requests that can be
        in progress; when this limit is reached additional requests will be
        queued. Note that time spent waiting in this queue still counts
        against the ``request_timeout``.

        ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like ``/etc/hosts`` is not possible or desirable (e.g. in
        unittests).

        ``max_buffer_size`` (default 100MB) is the number of bytes
        that can be read into memory at once. ``max_body_size``
        (defaults to ``max_buffer_size``) is the largest response body
        that the client will accept.  Without a
        ``streaming_callback``, the smaller of these two limits
        applies; with a ``streaming_callback`` only ``max_body_size``
        does.

        .. versionchanged:: 4.2
           Added the ``max_body_size`` argument.
        """
        super(SimpleAsyncHTTPClient, self).initialize(io_loop,
                                                      defaults=defaults)
        self.max_clients = max_clients
        self.queue = collections.deque()
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size
        self.max_body_size = max_body_size
        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
示例#49
0
def test_BatchedStream():
    port = 3434
    server = MyServer()
    server.listen(port)

    client = TCPClient()
    stream = yield client.connect('127.0.0.1', port)
    b = BatchedStream(stream, interval=20)

    b.send('hello')
    b.send('world')

    result = yield b.recv(); assert result == 'hello'
    result = yield b.recv(); assert result == 'hello'
    result = yield b.recv(); assert result == 'world'
    result = yield b.recv(); assert result == 'world'

    b.close()
示例#50
0
    def start(self):
        self.client = TCPClient()

        self.pcb = PeriodicCallback(self.send, 1000.0 / self.n)
        self.pcb.start()

        IOLoop.current().call_later(self.duration + 0.5, self.stop)
        IOLoop.current().start()
        IOLoop.clear_current()
示例#51
0
 def run_task(self, host):
     client = TCPClient()
     LOG.debug("connecting to `%s:%s'" % (host, self.port))
     try:
         stream = yield client.connect(host, port=self.port)
         LOG.debug("sending query `%s' to `%s:%s'" % (
             self.content.encode('string-escape'), host, self.port))
         yield stream.write(self.content)
         ret = yield stream.read_until_close()
         resp = str(ret).encode('string-escape')[:DEBUG_CONTENT_LENGTH]
         LOG.debug("`%s:%s' returns `%s'" % (host, self.port, resp))
         stream.close()
         del stream
         self.returns[host] = ret
     except:
         LOG.warn("`%s:%s' return status unknown" % (host, self.port))
         self.returns[host] = None
     finally:
         client = None
示例#52
0
def test_BatchedSend():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)
        yield b.last_send

        yield gen.sleep(0.020)

        b.send('hello')
        b.send('hello')
        b.send('world')
        yield gen.sleep(0.020)
        b.send('HELLO')
        b.send('HELLO')

        result = yield read(stream); assert result == ['hello', 'hello', 'world']
        result = yield read(stream); assert result == ['HELLO', 'HELLO']
示例#53
0
    def __init__(self, io_loop=None, max_clients=10, max_buffer_size=None, max_response_size=None):
        self._io_loop = io_loop or IOLoop.instance()
        self.max_clients = max_clients
        self.max_buffer_size = max_buffer_size or 104857600  # 100M
        self.max_response_size = max_response_size or 10 * 1024 * 1024  # 10M

        self.queue = collections.deque()
        self.active = {}
        self.waiting = {}
        self._client_closed = False
        self.tcp_client = TCPClient(io_loop=self._io_loop)
示例#54
0
def test_send_before_close():
    with echo_server() as e:
        client = TCPClient()
        stream = yield client.connect('127.0.0.1', e.port)

        b = BatchedSend(interval=10)
        b.start(stream)

        cnt = int(e.count)
        b.send('hello')
        yield b.close()         # close immediately after sending
        assert not b.buffer

        start = time()
        while e.count != cnt + 1:
            yield gen.sleep(0.01)
            assert time() < start + 5

        with pytest.raises(StreamClosedError):
            b.send('123')
示例#55
0
class SafeTcpStream:
    def __init__(self, host, port):
        self.host = host
        self.port = port
        self.stream = None
        self.tcp_client = TCPClient()
        self.make_tcp_connection_loop()

    @gen.coroutine
    def make_tcp_connection_loop(self, once=False):
        while True:
            if self.stream is None:
                try:
                    if once:
                        self.stream = yield gen.with_timeout(
                            ioloop.IOLoop.instance().time() + ONCE_CONNECT_TIMEOUT,
                            self.tcp_client.connect(self.host, self.port))
                    else:
                        self.stream = yield self.tcp_client.connect(self.host, self.port)

                    if self.stream is not None:
                        self.stream.set_close_callback(self.disconnected)
                except Exception:
                    pass
            if once:
                break
            yield gen.Task(ioloop.IOLoop.instance().add_timeout, ioloop.IOLoop.instance().time() + RECONNECT_INTERVAL)

    def disconnected(self):
        try:
            self.stream.close()
        except Exception:
            pass
        self.stream = None

    def write(self, data):
        if self.stream is not None:
            return self.stream.write(data)
示例#56
0
 def __init__(self, host, port, user_connection_id, docker_id, io_loop):
     self.__host = host
     self.__port = port
     self.__user_connection_id = user_connection_id
     self.__docker_id = docker_id
     self._io_loop = io_loop
     self.client = TCPClient(io_loop=self._io_loop)
     self._stream = None
     self._requests = dict()
     self._requests_signals = {
         packet.InPacket.METHOD_SELECT_RESULT: Signal("data"),
         packet.InPacket.METHOD_GET_STATUS: Signal("data"),
         packet.InPacket.METHOD_CANCEL: Signal("data"),
     }
    def initialize(self, io_loop, max_clients=10,
                   hostname_mapping=None, max_buffer_size=104857600,
                   resolver=None, defaults=None, max_header_size=None):
        """Creates a AsyncHTTPClient.

        # 一个IOLoop实例只有一个AsyncHTTPClient实例
        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        force_instance=True may be used to suppress this behavior.

        max_clients is the number of concurrent requests that can be
        in progress.  Note that this arguments are only used when the
        client is first created, and will be ignored when an existing
        client is reused.

        hostname_mapping is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like /etc/hosts is not possible or desirable (e.g. in
        unittests).

        max_buffer_size is the number of bytes that can be read by IOStream. It
        defaults to 100mb.
        """
        super(SimpleAsyncHTTPClient, self).initialize(io_loop,
                                                      defaults=defaults)
        self.max_clients = max_clients  # 最大连接数
        self.queue = collections.deque()  # 来一个小小的队列
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size

        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        # 这里估计是一个解析DNS的配置,先不理会
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        # 这里来了一个tcp的连接
        self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
示例#58
0
    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })

        self.tcp_client = TCPClient(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response,
            104857600, self.tcp_client, 65536)