Esempio n. 1
0
 def __init__(self,
              host='localhost',
              port=6379,
              db=0,
              password=None,
              socket_timeout=None,
              socket_connect_timeout=None,
              retry_on_timeout=False,
              encoding="utf-8",
              encoding_errors='strict',
              decode_responses=False,
              parser_class=PythonParser):
     self.pid = os.getpid()
     self.host = host
     self.port = int(port)
     self.db = db
     self.password = password
     self._timeout = None
     self.io_loop = IOLoop.current()
     self.socket_timeout = socket_timeout
     self.socket_connect_timeout = socket_connect_timeout or socket_timeout
     self.retry_on_timeout = retry_on_timeout
     self.encoder = Encoder(encoding, encoding_errors, decode_responses)
     self._stream = None
     self._parser = parser_class()
     self.resolver = Resolver()
     self._description_args = {
         'host': self.host,
         'port': self.port,
         'db': self.db,
     }
     self._connect_callbacks = []
Esempio n. 2
0
 def __init__(self, sock):
     self._iostream = IOStream(sock)
     self._resolver = Resolver()
     self._readtimeout = 0
     self._connecttimeout = 0
     self._rbuffer = StringIO(b'')
     self._rbuffer_size = 0
Esempio n. 3
0
 def setUp(self):
     super(TrickleTCPTest, self).setUp()
     sock, port = bind_unused_port()
     self.port = port
     self.server = TestTCPServer(self.io_loop)
     self.server.add_socket(sock)
     self.resolver = Resolver()
Esempio n. 4
0
 def __init__(self, resolver: Optional[Resolver] = None) -> None:
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver()
         self._own_resolver = True
Esempio n. 5
0
 def __init__(self, resolver=None):
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver()
         self._own_resolver = True
Esempio n. 6
0
def tornado_init():
    """tornado init configure"""
    asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())

    if platform.system() != "Windows":
        Resolver.configure('tornado.playform.careresolver.CaresResolver')
        AsyncHTTPClient.configure(
            'tornado.curl_httpclient.CurlAsyncHTTPClient')
Esempio n. 7
0
 def __init__(self, resolver=None, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver(io_loop=io_loop)
         self._own_resolver = True
class TCPClient(object):
    """A non-blocking TCP connection factory.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    def __init__(self, resolver=None, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self,
                host,
                port,
                af=socket.AF_UNSPEC,
                ssl_options=None,
                max_buffer_size=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo, self.io_loop,
            functools.partial(self._create_stream, max_buffer_size))
        af, addr, stream = yield connector.start()
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            stream = yield stream.start_tls(False,
                                            ssl_options=ssl_options,
                                            server_hostname=host)
        raise gen.Return(stream)

    def _create_stream(self, max_buffer_size, af, addr):
        # Always connect in plaintext; we'll convert to ssl if necessary
        # after one connection has completed.
        try:
            stream = IOStream(socket.socket(af),
                              io_loop=self.io_loop,
                              max_buffer_size=max_buffer_size)
        except socket.error as e:
            fu = Future()
            fu.set_exception(e)
            return fu
        else:
            return stream.connect(addr)
Esempio n. 9
0
def main():

    # using CaresResolver as DNS resolver
    # see also: http://www.tornadoweb.org/en/branch3.0/caresresolver.html
    Resolver.configure('tornado.platform.caresresolver.CaresResolver')
    # CurlAsyncHTTPClient to be used as httpclient subclass
    tornado.httpclient.AsyncHTTPClient.configure(
        "tornado.curl_httpclient.CurlAsyncHTTPClient")

    define("port", default=8080, help="run on the given port", type=int)
    # define("address", default=get_listening_address(), help="run on the given address", type=str)
    define("daemon", default=settings.daemon, help="run as daemon", type=bool)
    define("webgate",
           default=settings.webgate,
           help="run on web gate mode",
           type=bool)
    define("log_to_file", default=False, help="log to file", type=bool)
    define("game_host",
           default=settings.game_servers['development']['host'],
           help="bind address",
           type=str)
    define("game_port",
           default=settings.game_servers['development']['port'],
           help="run on the given port",
           type=int)
    define("mode",
           default="development",
           help="default run in development mode",
           type=str)
    if '--daemon' not in sys.argv:
        parse_command_line(sys.argv + ['--log_to_stderr'])
    else:
        parse_command_line(final=False)

    game_server = settings.game_servers[options.mode]
    assert (game_server)
    if options.daemon:
        from lockfile.pidlockfile import PIDLockFile
        import daemon
        main_log = open(settings.main_log_file, "a+")
        pid_file = os.path.join(settings.ROOT, "pids",
                                "%s-%s.pid" % (settings.APPNAME, options.port))
        if not daemon_running(pid_file):
            ctx = daemon.DaemonContext(
                stdout=main_log,
                stderr=main_log,
                pidfile=PIDLockFile(pid_file, threaded=False),
                working_directory=settings.ROOT,
            )
            ctx.open()
            settings.daemon = options.daemon
            options.log_to_file = True
            options.log_file_prefix = settings.tornado_log_prefix % options.port
            parse_command_line(['--log_file_prefix', options.log_file_prefix])

    start()
Esempio n. 10
0
    def _resolve(self, ioloop):
        """ Resolve host addr (domain)

        Args:
            ioloop (IOLoop): io_loop to use
        Returns:
            Tuple of address family and ip address
        """
        resolver = Resolver(io_loop=ioloop)
        addrinfo = yield resolver.resolve(self.addr, int(self.port), socket.AF_UNSPEC)
        raise gen.Return(addrinfo[0])
Esempio n. 11
0
class TCPClient(object):
    """A non-blocking TCP connection factory.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    def __init__(self, resolver=None, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
                max_buffer_size=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo, self.io_loop,
            functools.partial(self._create_stream, max_buffer_size))
        af, addr, stream = yield connector.start()
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            stream = yield stream.start_tls(False, ssl_options=ssl_options,
                                            server_hostname=host)
        raise gen.Return(stream)

    def _create_stream(self, max_buffer_size, af, addr):
        # Always connect in plaintext; we'll convert to ssl if necessary
        # after one connection has completed.
        try:
            stream = IOStream(socket.socket(af),
                            io_loop=self.io_loop,
                            max_buffer_size=max_buffer_size)
        except socket.error as e:
            fu = Future()
            fu.set_exception(e)
            return fu
        else:
            return stream.connect(addr)
Esempio n. 12
0
    def _resolve(self, ioloop):
        """ Resolve host addr (domain)

        Args:
            ioloop (IOLoop): io_loop to use
        Returns:
            Tuple of address family and ip address
        """
        resolver = Resolver(io_loop=ioloop)
        addrinfo = yield resolver.resolve(self.addr, int(self.port),
                                          socket.AF_UNSPEC)
        raise gen.Return(addrinfo[0])
Esempio n. 13
0
class TCPClient(object):
    """A non-blocking TCP connection factory.
    """
    def __init__(self, resolver=None, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self,
                host,
                port,
                af=socket.AF_UNSPEC,
                ssl_options=None,
                max_buffer_size=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo, self.io_loop,
            functools.partial(self._create_stream, host, ssl_options,
                              max_buffer_size))
        af, addr, stream = yield connector.start()
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on sbusequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        raise gen.Return(stream)

    def _create_stream(self, host, ssl_options, max_buffer_size, af, addr):
        # TODO: we should connect in plaintext mode and start the
        # ssl handshake only after stopping the _Connector.
        if ssl_options is None:
            stream = IOStream(socket.socket(af),
                              io_loop=self.io_loop,
                              max_buffer_size=max_buffer_size)
        else:
            stream = SSLIOStream(socket.socket(af),
                                 io_loop=self.io_loop,
                                 ssl_options=ssl_options,
                                 max_buffer_size=max_buffer_size)
        return stream.connect(addr, server_hostname=host)
Esempio n. 14
0
class TCPClient(object):
    """A non-blocking TCP connection factory.
    """
    def __init__(self, resolver=None, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
                max_buffer_size=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo, self.io_loop,
            # 这个涉及到三个函数的调用,不说了,多是泪
            functools.partial(self._create_stream,
                              host, ssl_options, max_buffer_size))
        af, addr, stream = yield connector.start()
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on sbusequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        raise gen.Return(stream)  # 这里gen会捕获这个错误

    def _create_stream(self, host, ssl_options, max_buffer_size, af, addr):
        # TODO: we should connect in plaintext mode and start the
        # ssl handshake only after stopping the _Connector.
        if ssl_options is None:
            stream = IOStream(socket.socket(af),
                              io_loop=self.io_loop,
                              max_buffer_size=max_buffer_size)
        else:
            stream = SSLIOStream(socket.socket(af),
                                 io_loop=self.io_loop,
                                 ssl_options=ssl_options,
                                 max_buffer_size=max_buffer_size)
        return stream.connect(addr, server_hostname=host)
Esempio n. 15
0
 def __init__(self, host, port, io_loop, key):
     self.io_loop = io_loop
     self.resolver = Resolver()
     self._callbacks = {}
     self._connected = False
     self.queue = deque()
     self.key = key
     self.stream = None
     self.pepv_act_resp = None
     self.prof = {}
     with stack_context.ExceptionStackContext(self._handle_exception):
         self.resolver.resolve(host,
                               port,
                               socket.AF_INET,
                               callback=self._on_resolve)
Esempio n. 16
0
 def __init__(self, host, port, io_loop):
     self.io_loop = io_loop
     self.resolver = Resolver()
     self.stream = None
     self.queue = deque()
     self._callbacks = {}
     self._connected = False
     self.read_state = self.READ_HEAD
     self.prev_response = None
     self.prof = {}
     with stack_context.ExceptionStackContext(self._handle_exception):
         self.resolver.resolve(host,
                               port,
                               socket.AF_INET,
                               callback=self._on_resolve)
Esempio n. 17
0
 def __init__(self, resolver=None):
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver()
         self._own_resolver = True
Esempio n. 18
0
    def initialize(self, io_loop, max_clients=10,
                   hostname_mapping=None, max_buffer_size=104857600,
                   resolver=None, defaults=None):
        """Creates a AsyncHTTPClient.

        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        force_instance=True may be used to suppress this behavior.

        max_clients is the number of concurrent requests that can be
        in progress.  Note that this arguments are only used when the
        client is first created, and will be ignored when an existing
        client is reused.

        hostname_mapping is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like /etc/hosts is not possible or desirable (e.g. in
        unittests).

        max_buffer_size is the number of bytes that can be read by IOStream. It
        defaults to 100mb.
        """
        super(SimpleAsyncHTTPClient, self).initialize(io_loop,
                                                      defaults=defaults)
        self.max_clients = max_clients
        self.queue = collections.deque()
        self.active = {}
        self.max_buffer_size = max_buffer_size
        self.resolver = resolver or Resolver(io_loop=io_loop)
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
Esempio n. 19
0
 def skipIfLocalhostV4(self):
     # The port used here doesn't matter, but some systems require it
     # to be non-zero if we do not also pass AI_PASSIVE.
     addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("localhost", 80))
     families = set(addr[0] for addr in addrinfo)
     if socket.AF_INET6 not in families:
         self.skipTest("localhost does not resolve to ipv6")
Esempio n. 20
0
 def do_register():
     resolver = UnixResolver(socket_file=args.control_unix_socket,
                             resolver=Resolver())
     AsyncHTTPClient.configure(None, resolver=resolver)
     client = AsyncHTTPClient()
     mtype = 'application/json'
     headers = {'Content-Type': mtype}
     body = json.dumps({
         "name": args.name,
         "type": args.type,
         "description": args.description
     })
     try:
         response = yield client.fetch('http://unixsocket/ports/{}'.format(
             args.port),
                                       method='PUT',
                                       headers=headers,
                                       body=body)
     except HTTPError as he:
         print("Could not register port: {}".format(he), file=sys.stderr)
         sys.exit(1)
     except Exception as e:
         logging.exception("Could not register port")
         sys.exit(1)
     print(response.body)
Esempio n. 21
0
def main():
    args = parse_command_line()

    if not args:
        args = ['localhost', 'www.google.com',
                'www.facebook.com', 'www.dropbox.com']

    resolvers = [Resolver(), ThreadedResolver()]

    if twisted is not None:
        from tornado.platform.twisted import TwistedResolver
        resolvers.append(TwistedResolver())

    if pycares is not None:
        from tornado.platform.caresresolver import CaresResolver
        resolvers.append(CaresResolver())

    family = {
        'unspec': socket.AF_UNSPEC,
        'inet': socket.AF_INET,
        'inet6': socket.AF_INET6,
    }[options.family]

    for host in args:
        print('Resolving %s' % host)
        for resolver in resolvers:
            addrinfo = yield resolver.resolve(host, 80, family)
            print('%s: %s' % (resolver.__class__.__name__,
                              pprint.pformat(addrinfo)))
        print()
class TCPClientTest(AsyncTestCase):
    def setUp(self):
        super(TCPClientTest, self).setUp()
        self.server = None
        self.client = TCPClient()

    def start_server(self, family):
        if family == socket.AF_UNSPEC and "TRAVIS" in os.environ:
            self.skipTest("dual-stack servers often have port conflicts on travis")
        self.server = TestTCPServer(family)
        return self.server.port

    def stop_server(self):
        if self.server is not None:
            self.server.stop()
            self.server = None

    def tearDown(self):
        self.client.close()
        self.stop_server()
        super(TCPClientTest, self).tearDown()

    def skipIf10.0.0.7V4(self):
        # The port used here doesn't matter, but some systems require it
        # to be non-zero if we do not also pass AI_PASSIVE.
        addrinfo = self.io_loop.run_sync(lambda: Resolver().resolve("10.0.0.7", 80))
        families = set(addr[0] for addr in addrinfo)
        if socket.AF_INET6 not in families:
            self.skipTest("10.0.0.7 does not resolve to ipv6")
Esempio n. 23
0
 def __init__(self, sock):
     self._iostream = IOStream(sock)
     self._resolver = Resolver()
     self._readtimeout = 0
     self._connecttimeout = 0
     self._rbuffer = StringIO(b'')
     self._rbuffer_size = 0
Esempio n. 24
0
    def initialize(self,
                   max_clients=10,
                   hostname_mapping=None,
                   max_buffer_size=104857600,
                   resolver=None,
                   defaults=None,
                   max_header_size=None,
                   max_body_size=None):
        """Creates a AsyncHTTPClient.

        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        ``force_instance=True`` may be used to suppress this behavior.

        Note that because of this implicit reuse, unless ``force_instance``
        is used, only the first call to the constructor actually uses
        its arguments. It is recommended to use the ``configure`` method
        instead of the constructor to ensure that arguments take effect.

        ``max_clients`` is the number of concurrent requests that can be
        in progress; when this limit is reached additional requests will be
        queued. Note that time spent waiting in this queue still counts
        against the ``request_timeout``.

        ``hostname_mapping`` is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like ``/etc/hosts`` is not possible or desirable (e.g. in
        unittests).

        ``max_buffer_size`` (default 100MB) is the number of bytes
        that can be read into memory at once. ``max_body_size``
        (defaults to ``max_buffer_size``) is the largest response body
        that the client will accept.  Without a
        ``streaming_callback``, the smaller of these two limits
        applies; with a ``streaming_callback`` only ``max_body_size``
        does.

        .. versionchanged:: 4.2
           Added the ``max_body_size`` argument.
        """
        super(SimpleAsyncHTTPClient, self).initialize(defaults=defaults)
        self.max_clients = max_clients
        self.queue = collections.deque()
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size
        self.max_body_size = max_body_size
        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver()
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        self.tcp_client = TCPClient(resolver=self.resolver)
Esempio n. 25
0
class TrickleHTTPTest(AsyncHTTPTestCase):
    # Tests with an HTTPServer.

    def setUp(self):
        super(TrickleHTTPTest, self).setUp()
        self.resolver = Resolver()

    def get_app(self):
        return Application([('/', TrickleTestHandler)])

    @gen_test
    def test_http(self):
        addr_info = yield self.resolver.resolve(
            'localhost',
            self.get_http_port(),
            socket.AF_INET)

        sock_addr = addr_info[0][1]
        trick = Trickle(
            socket.socket(socket.AF_INET),
            io_loop=self.io_loop)

        yield trick.connect(sock_addr)
        yield trick.write(b'GET / HTTP/1.1\r\n\r\n')

        headers = yield trick.read_until(b'\r\n\r\n')
        match = re.search(br'Content-Length: (\d+)\r\n', headers)
        content_length = int(match.group(1))
        body = yield trick.read_bytes(content_length)
        self.assertEqual(b'hello', body)
Esempio n. 26
0
class DockerAuthenticator(LocalAuthenticator):
    """A version that performs local system user creation from within a
    docker container.
    """

    resolver = UnixResolver(resolver=Resolver(), socket_path='/restuser.sock')
    AsyncHTTPClient.configure(None, resolver=resolver)
    client = AsyncHTTPClient()

    def system_user_exists(self, user):
        # user_id is stored in state after looking it up
        return user.state and 'user_id' in user.state

    @gen.coroutine
    def add_system_user(self, user):
        """Add a new user.
        This adds the user to the whitelist, and creates a system user by
        accessing a simple REST api.
        """
        try:
            resp = yield self.client.fetch('http://unix+restuser/' + user.name,
                                           method='POST',
                                           body='{}')
        except HTTPError as e:
            self.log.error("Failed to create %r", user.name, exc_info=True)
            raise

        # todo: save the user id into the whitelist or somewhere
        info = json.loads(resp.body.decode('utf8', 'replace'))
        self.log.info("Created user %s with uid %i", user.name, info['uid'])
        if user.state is None:
            user.state = {}
        user.state['user_id'] = info['uid']
        self.db.commit()
Esempio n. 27
0
 def skipIfLocalhostV4(self):
     # The port used here doesn't matter, but some systems require it
     # to be non-zero if we do not also pass AI_PASSIVE.
     Resolver().resolve('localhost', 80, callback=self.stop)
     addrinfo = self.wait()
     families = set(addr[0] for addr in addrinfo)
     if socket.AF_INET6 not in families:
         self.skipTest("localhost does not resolve to ipv6")
Esempio n. 28
0
 def __init__(self, resolver=None, io_loop=None):
     self.io_loop = io_loop or IOLoop.current()
     if resolver is not None:
         self.resolver = resolver
         self._own_resolver = False
     else:
         self.resolver = Resolver(io_loop=io_loop)
         self._own_resolver = True
Esempio n. 29
0
def main():
    app = Application([('/', HelloHandler)])
    server = HTTPServer(app)
    server.add_socket(bind_unix_socket(SOCKPATH))

    resolver = UnixResolver(resolver=Resolver())
    AsyncHTTPClient.configure(None, resolver=resolver)

    response = yield AsyncHTTPClient().fetch('http://unixsocket/')
    print response.body
Esempio n. 30
0
    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })

        self.resolver = Resolver(io_loop=io_loop)
        super(WebSocketClientConnection,
              self).__init__(io_loop, None, request, lambda: None,
                             self._on_http_response, 104857600, self.resolver)
Esempio n. 31
0
    def __init__(self,
                 io_loop,
                 hostname_mapping=None,
                 max_buffer_size=104857600,
                 resolver=None,
                 defaults=None,
                 idle_timeout=30.0):
        """Creates a AsyncHTTPClient.

        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        force_instance=True may be used to suppress this behavior.

        max_clients is the number of concurrent requests that can be
        in progress.  Note that this arguments are only used when the
        client is first created, and will be ignored when an existing
        client is reused.

        hostname_mapping is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like /etc/hosts is not possible or desirable (e.g. in
        unittests).

        max_buffer_size is the number of bytes that can be read by IOStream. It
        defaults to 100mb.
        """
        self.io_loop = io_loop
        self.queue = collections.deque()
        self.active = {}
        self.max_buffer_size = max_buffer_size
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver(io_loop=self.io_loop)
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)

        self.defaults = dict(HTTPRequest._DEFAULTS)
        if defaults is not None:
            self.defaults.update(defaults)

        self.connection = KeepAliveHTTPConnection(self.io_loop, self,
                                                  self.max_buffer_size,
                                                  self.resolver)

        self.idle_timeout = idle_timeout
        self._idle_timeout_callback = None
        self.logger = logging.getLogger(self.__class__.__name__)
Esempio n. 32
0
    def initialize(self,
                   io_loop,
                   max_clients=10,
                   hostname_mapping=None,
                   max_buffer_size=104857600,
                   resolver=None,
                   defaults=None,
                   max_header_size=None):
        """Creates a AsyncHTTPClient.

        # 一个IOLoop实例只有一个AsyncHTTPClient实例
        Only a single AsyncHTTPClient instance exists per IOLoop
        in order to provide limitations on the number of pending connections.
        force_instance=True may be used to suppress this behavior.

        max_clients is the number of concurrent requests that can be
        in progress.  Note that this arguments are only used when the
        client is first created, and will be ignored when an existing
        client is reused.

        hostname_mapping is a dictionary mapping hostnames to IP addresses.
        It can be used to make local DNS changes when modifying system-wide
        settings like /etc/hosts is not possible or desirable (e.g. in
        unittests).

        max_buffer_size is the number of bytes that can be read by IOStream. It
        defaults to 100mb.
        """
        super(SimpleAsyncHTTPClient, self).initialize(io_loop,
                                                      defaults=defaults)
        self.max_clients = max_clients  # 最大连接数
        self.queue = collections.deque()  # 来一个小小的队列
        self.active = {}
        self.waiting = {}
        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size

        # TCPClient could create a Resolver for us, but we have to do it
        # ourselves to support hostname_mapping.
        # 这里估计是一个解析DNS的配置,先不理会
        if resolver:
            self.resolver = resolver
            self.own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self.own_resolver = True
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                                             mapping=hostname_mapping)
        # 这里来了一个tcp的连接
        self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop)
Esempio n. 33
0
 def raw_fetch(self, headers, body):
     client = SimpleAsyncHTTPClient(self.io_loop)
     conn = RawRequestHTTPConnection(
         self.io_loop, client,
         httpclient._RequestProxy(httpclient.HTTPRequest(self.get_url("/")),
                                  dict(httpclient.HTTPRequest._DEFAULTS)),
         None, self.stop, 1024 * 1024, Resolver(io_loop=self.io_loop))
     conn.set_request(b"\r\n".join(
         headers + [utf8("Content-Length: %d\r\n" % len(body))]) + b"\r\n" +
                      body)
     response = self.wait()
     client.close()
     response.rethrow()
     return response
Esempio n. 34
0
def _request_for_tornado_client(urlstring,
                                method="GET",
                                body=None,
                                headers=None):
    """A utility that provides a context that handles
    HTTP, HTTPS, and HTTP+UNIX request.
    Creates a tornado HTTPRequest object with a URL
    that tornado's HTTPClients can accept.
    If the request is made to a unix socket, temporarily
    configure the AsyncHTTPClient to resolve the URL
    and connect to the proper socket.
    """
    parts = urlsplit(urlstring)
    if parts.scheme in ["http", "https"]:
        pass
    elif parts.scheme == "http+unix":
        # If unix socket, mimic HTTP.
        parts = SplitResult(
            scheme="http",
            netloc=parts.netloc,
            path=parts.path,
            query=parts.query,
            fragment=parts.fragment,
        )

        class UnixSocketResolver(Resolver):
            """A resolver that routes HTTP requests to unix sockets
            in tornado HTTP clients.
            Due to constraints in Tornados' API, the scheme of the
            must be `http` (not `http+unix`). Applications should replace
            the scheme in URLS before making a request to the HTTP client.
            """
            def initialize(self, resolver):
                self.resolver = resolver

            def close(self):
                self.resolver.close()

            async def resolve(self, host, port, *args, **kwargs):
                return [(socket.AF_UNIX, urldecode_unix_socket_path(host))]

        resolver = UnixSocketResolver(resolver=Resolver())
        AsyncHTTPClient.configure(None, resolver=resolver)
    else:
        raise Exception("Unknown URL scheme.")

    # Yield the request for the given client.
    url = urlunsplit(parts)
    request = HTTPRequest(url, method=method, body=body, headers=headers)
    yield request
Esempio n. 35
0
 def do_unregister():
     resolver = UnixResolver(socket_file=args.control_unix_socket,
                             resolver=Resolver())
     AsyncHTTPClient.configure(None, resolver=resolver)
     client = AsyncHTTPClient()
     try:
         response = yield client.fetch('http://unixsocket/ports/{}'.format(
             args.port),
                                       method='DELETE')
     except HTTPError as he:
         print("Could not unregister port: {}".format(he), file=sys.stderr)
         sys.exit(1)
     except Exception as e:
         logging.exception("Could not unregister port")
         sys.exit(1)
     print(response.body)
Esempio n. 36
0
    def __init__(self, io_loop=None, hostname_mapping=None,
            max_buffer_size=104857600, max_header_size=None,
            max_body_size=None):
        super(HTTPAdapter, self).__init__()

        self.max_buffer_size = max_buffer_size
        self.max_header_size = max_header_size
        self.max_body_size = max_body_size
        self.io_loop = io_loop or IOLoop.current()

        self.resolver = Resolver()
        if hostname_mapping is not None:
            self.resolver = OverrideResolver(resolver=self.resolver,
                mapping=hostname_mapping)

        self.tcp_client = TCPClient(resolver=self.resolver)
Esempio n. 37
0
    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })

        self.resolver = Resolver(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response,
            104857600, self.resolver)
Esempio n. 38
0
class URLString(str):
    ''' A str wrapper, has more supports of URL '''
    __slots__ = ('parsed', 'solver')

    def __new__(cls, s):
        ''' New hook '''
        return str.__new__(cls, s)

    def __init__(self, s):
        super().__init__()
        self.parsed = urlparse(self)
        self.solver = Resolver()

    @classmethod
    def config_solver(cls, solver_type='tornado.netutil.BlockingResolver'):
        Resolver.configure(solver_type)

    @property
    def resolve(self) -> list:
        ''' DNS resolve '''
        return self.solver.resolve(self.parsed.netloc, port=80).result()

    def HEAD(self, **kwargs) -> bytes:
        ''' http HEAD method '''
        assert self.parsed.scheme
        return requests.head(self, **kwargs).content

    def GET(self, **kwargs) -> bytes:
        ''' HTTP GET method '''
        assert self.parsed.scheme
        return requests.get(self, **kwargs).content

    def POST(self, data=dict(), **kwargs) -> bytes:
        ''' HTTP POST method '''
        assert self.parsed.scheme
        return requests.post(self, data=data, **kwargs).content

    def __getattr__(self, attr):
        ''' Get attributes support '''
        return self.parsed.__getattribute__(attr)
Esempio n. 39
0
    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(":")
        scheme = {"ws": "http", "wss": "https"}[scheme]
        request.url = scheme + sep + rest
        request.headers.update(
            {
                "Upgrade": "websocket",
                "Connection": "Upgrade",
                "Sec-WebSocket-Key": self.key,
                "Sec-WebSocket-Version": "13",
            }
        )

        self.resolver = Resolver(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.resolver
        )
Esempio n. 40
0
 def __init__(self, io_loop):
     self.io_loop = io_loop
     # Default blocking resolver calling socket.getaddrinfo
     self.resolver = Resolver(io_loop=io_loop)
     self._own_resolver = True
Esempio n. 41
0
        #回调函数
        self.finish(result)

    @async_execute
    def dosomething(self,a,b,callback=None):
        #耗时操作
        result='return'
        return result
"""


import functools
from tornado.netutil import Resolver, ThreadedResolver
from tornado.ioloop import IOLoop

Resolver.configure('tornado.netutil.ThreadedResolver', num_threads=10)

def async_execute(fn):
    """
    新版tornado已有这个函数的实现,但是需要在class中绑定self.ioloop和self.executor,不太方便,这个版本消除了这个问题
    """
    thread_resolver = ThreadedResolver()

    @functools.wraps(fn)
    def wrapper(self, *args, **kwargs):
        callback = kwargs.pop("callback", None)
        future = thread_resolver.executor.submit(fn, self, *args, **kwargs)
        if callback:
            IOLoop.current().add_future(future,
                                    lambda future: callback(future.result()))
        return future
Esempio n. 42
0
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
    """WebSocket client connection."""

    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(":")
        scheme = {"ws": "http", "wss": "https"}[scheme]
        request.url = scheme + sep + rest
        request.headers.update(
            {
                "Upgrade": "websocket",
                "Connection": "Upgrade",
                "Sec-WebSocket-Key": self.key,
                "Sec-WebSocket-Version": "13",
            }
        )

        self.resolver = Resolver(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.resolver
        )

    def _on_close(self):
        self.on_message(None)
        self.resolver.close()

    def _on_http_response(self, response):
        if not self.connect_future.done():
            if response.error:
                self.connect_future.set_exception(response.error)
            else:
                self.connect_future.set_exception(WebSocketError("Non-websocket response"))

    def _handle_1xx(self, code):
        assert code == 101
        assert self.headers["Upgrade"].lower() == "websocket"
        assert self.headers["Connection"].lower() == "upgrade"
        accept = WebSocketProtocol13.compute_accept_value(self.key)
        assert self.headers["Sec-Websocket-Accept"] == accept

        self.protocol = WebSocketProtocol13(self, mask_outgoing=True)
        self.protocol._receive_frame()

        if self._timeout is not None:
            self.io_loop.remove_timeout(self._timeout)
            self._timeout = None

        self.connect_future.set_result(self)

    def write_message(self, message, binary=False):
        """Sends a message to the WebSocket server."""
        self.protocol.write_message(message, binary)

    def read_message(self, callback=None):
        """Reads a message from the WebSocket server.

        Returns a future whose result is the message, or None
        if the connection is closed.  If a callback argument
        is given it will be called with the future when it is
        ready.
        """
        assert self.read_future is None
        future = TracebackFuture()
        if self.read_queue:
            future.set_result(self.read_queue.popleft())
        else:
            self.read_future = future
        if callback is not None:
            self.io_loop.add_future(future, callback)
        return future

    def on_message(self, message):
        if self.read_future is not None:
            self.read_future.set_result(message)
            self.read_future = None
        else:
            self.read_queue.append(message)

    def on_pong(self, data):
        pass
Esempio n. 43
0
class AsyncSocket(object):
    def __init__(self, sock):
        self._iostream = IOStream(sock)
        self._resolver = Resolver()
        self._readtimeout = 0
        self._connecttimeout = 0
   
    def set_readtimeout(self, timeout):
        self._readtimeout = timeout

    def set_connecttimeout(self, timeout):
        self._connecttimeout = timeout

    @synclize
    def connect(self, address):
        host, port = address
        timer = None
        try:
            if self._connecttimeout:
                timer = Timeout(self._connecttimeout)
                timer.start()
            resolved_addrs = yield self._resolver.resolve(host, port, family=socket.AF_INET)
            for addr in resolved_addrs:
                family, host_port = addr
                yield self._iostream.connect(host_port)
                break
        except TimeoutException:
            self.close()
            raise
        finally:
            if timer:
                timer.cancel()
    #@synclize
    def sendall(self, buff):
        self._iostream.write(buff)

    @synclize
    def read(self, nbytes, partial=False):
        timer = None
        try:
            if self._readtimeout:
                timer = Timeout(self._readtimeout)
                timer.start()
            buff = yield self._iostream.read_bytes(nbytes, partial=partial)
            raise Return(buff)
        except TimeoutException:
            self.close()
            raise
        finally:
            if timer:
                timer.cancel()

    def recv(self, nbytes):
        return self.read(nbytes, partial=True)

    @synclize
    def readline(self, max_bytes=-1):
        timer = None
        if self._readtimeout:
            timer = Timeout(self._readtimeout)
            timer.start()
        try:
            if max_bytes > 0:
                buff = yield self._iostream.read_until('\n', max_bytes=max_bytes)
            else:
                buff = yield self._iostream.read_until('\n')
            raise Return(buff)
        except TimeoutException:
            self.close()
            raise
        finally:
            if timer:
                timer.cancel()

    def close(self):
        self._iostream.close()

    def set_nodelay(self, flag):
        self._iostream.set_nodelay(flag)

    def settimeout(self, timeout):
        pass

    def shutdown(self, direction):
        if self._iostream.fileno():
            self._iostream.fileno().shutdown(direction)

    def recv_into(self, buff):
        expected_rbytes = len(buff)
        data = self.read(expected_rbytes, True)
        srcarray = bytearray(data)
        nbytes = len(srcarray)
        buff[0:nbytes] = srcarray
        return nbytes

    def makefile(self, mode, other):
        return self
Esempio n. 44
0
 def _config_resolver(cls, num_threads=10):
     from tornado.netutil import Resolver
     Resolver.configure(
             'tornado.netutil.ThreadedResolver',
             num_threads=num_threads)
     cls._resolver_configured = True
Esempio n. 45
0
class AsyncSocket(object):
    def __init__(self, sock):
        self._iostream = IOStream(sock)
        self._resolver = Resolver()
        self._readtimeout = 0
        self._connecttimeout = 0
        self._rbuffer = StringIO(b'')
        self._rbuffer_size = 0

    def set_readtimeout(self, timeout):
        self._readtimeout = timeout

    def set_connecttimeout(self, timeout):
        self._connecttimeout = timeout

    @synclize
    def connect(self, address):
        host, port = address
        timer = None
        try:
            if self._connecttimeout:
                timer = Timeout(self._connecttimeout)
                timer.start()
            resolved_addrs = yield self._resolver.resolve(
                host,
                port,
                family=socket.AF_INET)
            for addr in resolved_addrs:
                family, host_port = addr
                yield self._iostream.connect(host_port)
                break
        except TimeoutException as e:
            self.close()
            raise socket.timeout(e.message)
        finally:
            if timer:
                timer.cancel()

    def sendall(self, buff):
        self._iostream.write(buff)

    def read(self, nbytes):
        if nbytes <= self._rbuffer_size:
            self._rbuffer_size -= nbytes
            return self._rbuffer.read(nbytes)

        if self._rbuffer_size > 0:
            self._iostream._read_buffer.appendleft(self._rbuffer.read())
            self._iostream._read_buffer_size += self._rbuffer_size
            self._rbuffer_size = 0

        if nbytes <= self._iostream._read_buffer_size:
            data, data_len = b''.join(
                self._iostream._read_buffer), self._iostream._read_buffer_size
            self._iostream._read_buffer.clear()
            self._iostream._read_buffer_size = 0

            if data_len == nbytes:
                return data

            self._rbuffer_size = data_len - nbytes
            self._rbuffer = StringIO(data)
            return self._rbuffer.read(nbytes)

        data = self._read(nbytes)
        if len(data) == nbytes:
            return data

        self._rbuffer_size = len(data) - nbytes
        self._rbuffer = StringIO(data)
        return self._rbuffer.read(nbytes)

    @synclize
    def _read(self, nbytes):
        timer = None
        try:
            if self._readtimeout:
                timer = Timeout(self._readtimeout)
                timer.start()
            data = yield self._iostream.read_bytes(nbytes)
            raise Return(data)
        except TimeoutException as e:
            self.close()
            raise socket.timeout(e.message)
        finally:
            if timer:
                timer.cancel()

    def recv(self, nbytes):
        return self.read(nbytes)

    def close(self):
        self._iostream.close()

    def set_nodelay(self, flag):
        self._iostream.set_nodelay(flag)

    def settimeout(self, timeout):
        pass

    def shutdown(self, direction):
        if self._iostream.fileno():
            self._iostream.fileno().shutdown(direction)

    def recv_into(self, buff):
        expected_rbytes = len(buff)
        data = self.read(expected_rbytes)
        srcarray = bytearray(data)
        nbytes = len(srcarray)
        buff[0:nbytes] = srcarray
        return nbytes

    def makefile(self, mode, other):
        return self

    def fileno(self):
        return self._iostream.fileno()
Esempio n. 46
0
class TCPClient(object):
    """A non-blocking TCP connection factory.

    .. versionchanged:: 5.0
       The ``io_loop`` argument (deprecated since version 4.1) has been removed.
    """
    def __init__(self, resolver=None):
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver()
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
                max_buffer_size=None, source_ip=None, source_port=None,
                timeout=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).

        Using the ``source_ip`` kwarg, one can specify the source
        IP address to use when establishing the connection.
        In case the user needs to resolve and
        use a specific interface, it has to be handled outside
        of Tornado as this depends very much on the platform.

        Raises `TimeoutError` if the input future does not complete before
        ``timeout``, which may be specified in any form allowed by
        `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time
        relative to `.IOLoop.time`)

        Similarly, when the user requires a certain source port, it can
        be specified using the ``source_port`` arg.

        .. versionchanged:: 4.5
           Added the ``source_ip`` and ``source_port`` arguments.
        """
        if timeout is not None:
            if isinstance(timeout, numbers.Real):
                timeout = IOLoop.current().time() + timeout
            elif isinstance(timeout, datetime.timedelta):
                timeout = IOLoop.current().time() + timedelta_to_seconds(timeout)
            else:
                raise TypeError("Unsupported timeout %r" % timeout)
        if timeout is not None:
            addrinfo = yield gen.with_timeout(
                timeout, self.resolver.resolve(host, port, af))
        else:
            addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo,
            functools.partial(self._create_stream, max_buffer_size,
                              source_ip=source_ip, source_port=source_port)
        )
        af, addr, stream = yield connector.start(connect_timeout=timeout)
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            if timeout is not None:
                stream = yield gen.with_timeout(timeout, stream.start_tls(
                    False, ssl_options=ssl_options, server_hostname=host))
            else:
                stream = yield stream.start_tls(False, ssl_options=ssl_options,
                                                server_hostname=host)
        raise gen.Return(stream)

    def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
                       source_port=None):
        # Always connect in plaintext; we'll convert to ssl if necessary
        # after one connection has completed.
        source_port_bind = source_port if isinstance(source_port, int) else 0
        source_ip_bind = source_ip
        if source_port_bind and not source_ip:
            # User required a specific port, but did not specify
            # a certain source IP, will bind to the default loopback.
            source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
            # Trying to use the same address family as the requested af socket:
            # - 127.0.0.1 for IPv4
            # - ::1 for IPv6
        socket_obj = socket.socket(af)
        set_close_exec(socket_obj.fileno())
        if source_port_bind or source_ip_bind:
            # If the user requires binding also to a specific IP/port.
            try:
                socket_obj.bind((source_ip_bind, source_port_bind))
            except socket.error:
                socket_obj.close()
                # Fail loudly if unable to use the IP/port.
                raise
        try:
            stream = IOStream(socket_obj,
                              max_buffer_size=max_buffer_size)
        except socket.error as e:
            fu = Future()
            fu.set_exception(e)
            return fu
        else:
            return stream, stream.connect(addr)
Esempio n. 47
0
 def __init__(self, s):
     super().__init__()
     self.parsed = urlparse(self)
     self.solver = Resolver()
Esempio n. 48
0
 def config_solver(cls, solver_type='tornado.netutil.BlockingResolver'):
     Resolver.configure(solver_type)
Esempio n. 49
0
def set_resolver(resolver):
    Resolver.configure(resolver)
Esempio n. 50
0
class TCPClient(object):
    """A non-blocking TCP connection factory.

    .. versionchanged:: 4.1
       The ``io_loop`` argument is deprecated.
    """
    def __init__(self, resolver=None, io_loop=None):
        self.io_loop = io_loop or IOLoop.current()
        if resolver is not None:
            self.resolver = resolver
            self._own_resolver = False
        else:
            self.resolver = Resolver(io_loop=io_loop)
            self._own_resolver = True

    def close(self):
        if self._own_resolver:
            self.resolver.close()

    @gen.coroutine
    def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None,
                max_buffer_size=None, source_ip=None, source_port=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream` (or `.SSLIOStream` if
        ``ssl_options`` is not None).

        Using the ``source_ip`` kwarg, one can specify the source
        IP address to use when establishing the connection.
        In case the user needs to resolve and
        use a specific interface, it has to be handled outside
        of Tornado as this depends very much on the platform.

        Similarly, when the user requires a certain source port, it can
        be specified using the ``source_port`` arg.
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _Connector(
            addrinfo, self.io_loop,
            functools.partial(self._create_stream, max_buffer_size,
                              source_ip=source_ip, source_port=source_port)
        )
        af, addr, stream = yield connector.start()
        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)
        if ssl_options is not None:
            stream = yield stream.start_tls(False, ssl_options=ssl_options,
                                            server_hostname=host)
        raise gen.Return(stream)

    def _create_stream(self, max_buffer_size, af, addr, source_ip=None,
                       source_port=None):
        # Always connect in plaintext; we'll convert to ssl if necessary
        # after one connection has completed.
        source_port_bind = source_port if isinstance(source_port, int) else 0
        source_ip_bind = source_ip
        if source_port_bind and not source_ip:
            # User required a specific port, but did not specify
            # a certain source IP, will bind to the default loopback.
            source_ip_bind = '::1' if af == socket.AF_INET6 else '127.0.0.1'
            # Trying to use the same address family as the requested af socket:
            # - 127.0.0.1 for IPv4
            # - ::1 for IPv6
        socket_obj = socket.socket(af)
        if source_port_bind or source_ip_bind:
            # If the user requires binding also to a specific IP/port.
            socket_obj.bind((source_ip_bind, source_port_bind))
            # Fail loudly if unable to use the IP/port.
        try:
            stream = IOStream(socket_obj,
                              io_loop=self.io_loop,
                              max_buffer_size=max_buffer_size)
        except socket.error as e:
            fu = Future()
            fu.set_exception(e)
            return fu
        else:
            return stream.connect(addr)
Esempio n. 51
0
from tornado import netutil, ioloop, iostream, httpclient, stack_context
from tornado.netutil import Resolver
from functools import partial
import socket
import ctypes
import os, sys
import traceback
import re
import socket_error

Resolver.configure('tornado.platform.caresresolver.CaresResolver')
resolver = Resolver()

libc = ctypes.cdll.LoadLibrary('libc.so.6')
splice_syscall = libc.splice

SPLICE_F_NONBLOCK = 0x02
SPLICE_F_MOVE = 0x01

try:
    chunk_size = os.pathconf('.', os.pathconf_names['PC_PIPE_BUF'])
except:
    print 'pathconf failed'
    import resource
    chunk_size = resource.getpagesize()

header = 'GET /'
opt_header = 'OPTIO'

def make_response(status, body, content_type='text/plain', extra_headers=None, length=True):
    res = 'HTTP/1.1 %s\r\n' % status
Esempio n. 52
0
 def test_connect_ipv6_dual(self):
     self.skipIfLocalhostV4()
     if Resolver.configured_class().__name__.endswith('TwistedResolver'):
         self.skipTest('TwistedResolver does not support multiple addresses')
     self.do_test_connect(socket.AF_INET6, 'localhost')
Esempio n. 53
0
    @async_execute
    def dosomething(self,a,b,callback=None):
        #耗时操作
        result='return'
        return result
"""

import functools
from tornado.netutil import Resolver, ThreadedResolver
from tornado.ioloop import IOLoop
from ..settings_manager import settings
from multiprocessing import cpu_count


#设置接口的实现类的类型,同时设置设置实现类的参数
Resolver.configure('tornado.netutil.ThreadedResolver',
                   num_threads=settings.THREADS_NUM if 'THREADS_NUM' in settings else cpu_count())


def async_execute(fn):
    """
    新版tornado已有这个函数的实现,但是需要在class中绑定self.ioloop和self.executor,不太方便,这个版本消除了这个问题
    
    其实在 concurrent.py 中 run_on_executor 装饰器方法实现了类似的功能。

    """
    thread_resolver = ThreadedResolver()

    @functools.wraps(fn)
    def wrapper(self, *args, **kwargs):
        callback = kwargs.pop("callback", None)
        future = thread_resolver.executor.submit(fn, self, *args, **kwargs)
Esempio n. 54
0
class WebSocketClientConnection(simple_httpclient._HTTPConnection):
    """WebSocket client connection.

    This class should not be instantiated directly; use the
    `websocket_connect` function instead.
    """
    def __init__(self, io_loop, request):
        self.connect_future = TracebackFuture()
        self.read_future = None
        self.read_queue = collections.deque()
        self.key = base64.b64encode(os.urandom(16))

        scheme, sep, rest = request.url.partition(':')
        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
        request.url = scheme + sep + rest
        request.headers.update({
            'Upgrade': 'websocket',
            'Connection': 'Upgrade',
            'Sec-WebSocket-Key': self.key,
            'Sec-WebSocket-Version': '13',
        })

        self.resolver = Resolver(io_loop=io_loop)
        super(WebSocketClientConnection, self).__init__(
            io_loop, None, request, lambda: None, self._on_http_response,
            104857600, self.resolver)

    def close(self):
        """Closes the websocket connection.

        .. versionadded:: 3.2
        """
        if self.protocol is not None:
            self.protocol.close()
            self.protocol = None

    def _on_close(self):
        self.on_message(None)
        self.resolver.close()
        super(WebSocketClientConnection, self)._on_close()

    def _on_http_response(self, response):
        if not self.connect_future.done():
            if response.error:
                self.connect_future.set_exception(response.error)
            else:
                self.connect_future.set_exception(WebSocketError(
                    "Non-websocket response"))

    def _handle_1xx(self, code):
        assert code == 101
        assert self.headers['Upgrade'].lower() == 'websocket'
        assert self.headers['Connection'].lower() == 'upgrade'
        accept = WebSocketProtocol13.compute_accept_value(self.key)
        assert self.headers['Sec-Websocket-Accept'] == accept

        self.protocol = WebSocketProtocol13(self, mask_outgoing=True)
        self.protocol._receive_frame()

        if self._timeout is not None:
            self.io_loop.remove_timeout(self._timeout)
            self._timeout = None

        self.connect_future.set_result(self)

    def write_message(self, message, binary=False):
        """Sends a message to the WebSocket server."""
        self.protocol.write_message(message, binary)

    def read_message(self, callback=None):
        """Reads a message from the WebSocket server.

        Returns a future whose result is the message, or None
        if the connection is closed.  If a callback argument
        is given it will be called with the future when it is
        ready.
        """
        assert self.read_future is None
        future = TracebackFuture()
        if self.read_queue:
            future.set_result(self.read_queue.popleft())
        else:
            self.read_future = future
        if callback is not None:
            self.io_loop.add_future(future, callback)
        return future

    def on_message(self, message):
        if self.read_future is not None:
            self.read_future.set_result(message)
            self.read_future = None
        else:
            self.read_queue.append(message)

    def on_pong(self, data):
        pass
Esempio n. 55
0
#!/usr/bin/env python
# encoding: utf-8
from copy import copy
from tornado.web import Cookie
from tornado.gen import coroutine, Return
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError, HTTPResponse
from tornado.httputil import HTTPHeaders
from tornado.ioloop import IOLoop
from . import PY2

try:
    import pycares
    from tornado.netutil import Resolver
    from tornado.platform.caresresolver import CaresResolver

    Resolver.configure(CaresResolver)
except ImportError:
    pass


if PY2:
    b = unicode
    iteritems = lambda x: x.iteritems()
else:
    b = str
    iteritems = lambda x: x.items()


try:
    import ujson as json
except ImportError:
Esempio n. 56
0
class LBConnector(object):
    """ Adds support for sequential search for live LB to IOStream.
    Uses socket.create_connection to perform it - sequential approach.
    """

    def __init__(self, io_loop):
        self.io_loop = io_loop
        # Default blocking resolver calling socket.getaddrinfo
        self.resolver = Resolver(io_loop=io_loop)
        self._own_resolver = True

    def close(self):
        self.resolver.close()

    @gen.coroutine
    def connect(self, host, port, timeout, af=socket.AF_UNSPEC,
                max_buffer_size=None):
        """Connect to the given host and port.

        Asynchronously returns an `.IOStream`.
        """
        addrinfo = yield self.resolver.resolve(host, port, af)
        connector = _RandomConnector(
            addrinfo, self.io_loop,
            partial(self._create_stream, max_buffer_size, timeout))

        # Use large timeout for connection search, assume that all addresses
        # apart from the last will timeout
        total_connect_timeout = (len(addrinfo) + 1) * timeout
        af, addr, stream = yield connector.start(total_connect_timeout)

        # TODO: For better performance we could cache the (af, addr)
        # information here and re-use it on subsequent connections to
        # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2)

        # TODO: support ssl; it can be copied from tornado but we need to
        # read ssl opts from Connection
        raise gen.Return(stream)

    def _create_stream(self, max_buffer_size, timeout, af, addr):
        sock = socket.socket(af)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
        future = Future()
        stream = iostream.IOStream(sock,
                                   io_loop=self.io_loop,
                                   max_buffer_size=max_buffer_size)

        def on_stream_connect_timeout():
            """ Close the stream and pass an exception to caller """
            stream.set_close_callback(None)
            exc = iostream.StreamClosedError("Connect timeout")
            stream.close(exc_info=(None, exc, None))
            future.set_exception(exc)

        def on_stream_connected():
            """ On success clean after ourselves """
            self.io_loop.remove_timeout(handler)
            stream.set_close_callback(None)
            future.set_result(stream)

        def on_stream_error():
            """ Stream close while connecting means it failed
            Cancel the timeout and pass the error to caller """
            self.io_loop.remove_timeout(handler)
            future.set_exception(stream.error)

        timeout = timedelta(seconds=timeout)
        handler = self.io_loop.add_timeout(timeout, on_stream_connect_timeout)
        stream.set_close_callback(on_stream_error)
        stream.connect(addr, callback=on_stream_connected)
        return future
Esempio n. 57
0
 def __init__(self, sock):
     self._iostream = IOStream(sock)
     self._resolver = Resolver()
     self._readtimeout = 0
     self._connecttimeout = 0