Пример #1
0
 def test_100_continue(self):
     # Run through a 100-continue interaction by hand:
     # When given Expect: 100-continue, we get a 100 response after the
     # headers, and then the real response after the body.
     stream = IOStream(socket.socket(), io_loop=self.io_loop)
     stream.connect(("localhost", self.get_http_port()), callback=self.stop)
     self.wait()
     stream.write(b("\r\n").join([b("POST /hello HTTP/1.1"),
                               b("Content-Length: 1024"),
                               b("Expect: 100-continue"),
                               b("\r\n")]), callback=self.stop)
     self.wait()
     stream.read_until(b("\r\n\r\n"), self.stop)
     data = self.wait()
     self.assertTrue(data.startswith(b("HTTP/1.1 100 ")), data)
     stream.write(b("a") * 1024)
     stream.read_until(b("\r\n"), self.stop)
     first_line = self.wait()
     self.assertTrue(first_line.startswith(b("HTTP/1.1 200")), first_line)
     stream.read_until(b("\r\n\r\n"), self.stop)
     header_data = self.wait()
     headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
     stream.read_bytes(int(headers["Content-Length"]), self.stop)
     body = self.wait()
     self.assertEqual(body, b("Got 1024 bytes in POST"))
Пример #2
0
 def test_100_continue(self):
     # Run through a 100-continue interaction by hand:
     # When given Expect: 100-continue, we get a 100 response after the
     # headers, and then the real response after the body.
     stream = IOStream(socket.socket(), io_loop=self.io_loop)
     stream.connect(("localhost", self.get_http_port()), callback=self.stop)
     self.wait()
     stream.write(b("\r\n").join([
         b("POST /hello HTTP/1.1"),
         b("Content-Length: 1024"),
         b("Expect: 100-continue"),
         b("\r\n")
     ]),
                  callback=self.stop)
     self.wait()
     stream.read_until(b("\r\n\r\n"), self.stop)
     data = self.wait()
     self.assertTrue(data.startswith(b("HTTP/1.1 100 ")), data)
     stream.write(b("a") * 1024)
     stream.read_until(b("\r\n"), self.stop)
     first_line = self.wait()
     self.assertTrue(first_line.startswith(b("HTTP/1.1 200")), first_line)
     stream.read_until(b("\r\n\r\n"), self.stop)
     header_data = self.wait()
     headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
     stream.read_bytes(int(headers["Content-Length"]), self.stop)
     body = self.wait()
     self.assertEqual(body, b("Got 1024 bytes in POST"))
Пример #3
0
 def fetch(self, request, callback, **kwargs):
     if not isinstance(request, HTTPRequest):
         request = HTTPRequest(url=request, **kwargs)
     if not isinstance(request.headers, HTTPHeaders):
         request.headers = HTTPHeaders(request.headers)
     callback = stack_context.wrap(callback)
     self.queue.append((request, callback))
     self._process_queue()
     if self.queue:
         logging.debug("max_clients limit reached, request queued. "
                       "%d active, %d queued requests." %
                       (len(self.active), len(self.queue)))
Пример #4
0
    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
        assert match
        self.code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" % 
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))

        if self.request.method == "HEAD":
            # HEAD requests never have content, even though they may have
            # content-length headers
            self._on_body(b(""))
            return
        if 100 <= self.code < 200 or self.code in (204, 304):
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            assert "Transfer-Encoding" not in self.headers
            assert content_length in (None, 0)
            self._on_body(b(""))
            return

        if (self.request.use_gzip and
            self.headers.get("Content-Encoding") == "gzip"):
            # Magic parameter makes zlib module understand gzip header
            # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
            self._decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b("\r\n"), self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
Пример #5
0
    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+)", first_line)
        assert match
        self.code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))

        if self.request.method == "HEAD":
            # HEAD requests never have content, even though they may have
            # content-length headers
            self._on_body(b(""))
            return
        if 100 <= self.code < 200 or self.code in (204, 304):
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            assert "Transfer-Encoding" not in self.headers
            assert content_length in (None, 0)
            self._on_body(b(""))
            return

        if (self.request.use_gzip
                and self.headers.get("Content-Encoding") == "gzip"):
            # Magic parameter makes zlib module understand gzip header
            # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
            self._decompressor = zlib.decompressobj(16 + zlib.MAX_WBITS)
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b("\r\n"), self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
Пример #6
0
 def test_unix_socket(self):
     sockfile = os.path.join(self.tmpdir, "test.sock")
     sock = netutil.bind_unix_socket(sockfile)
     app = Application([("/hello", HelloWorldRequestHandler)])
     server = HTTPServer(app, io_loop=self.io_loop)
     server.add_socket(sock)
     stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
     stream.connect(sockfile, self.stop)
     self.wait()
     stream.write(b("GET /hello HTTP/1.0\r\n\r\n"))
     stream.read_until(b("\r\n"), self.stop)
     response = self.wait()
     self.assertEqual(response, b("HTTP/1.0 200 OK\r\n"))
     stream.read_until(b("\r\n\r\n"), self.stop)
     headers = HTTPHeaders.parse(self.wait().decode('latin1'))
     stream.read_bytes(int(headers["Content-Length"]), self.stop)
     body = self.wait()
     self.assertEqual(body, b("Hello world"))
Пример #7
0
 def test_unix_socket(self):
     sockfile = os.path.join(self.tmpdir, "test.sock")
     sock = netutil.bind_unix_socket(sockfile)
     app = Application([("/hello", HelloWorldRequestHandler)])
     server = HTTPServer(app, io_loop=self.io_loop)
     server.add_socket(sock)
     stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
     stream.connect(sockfile, self.stop)
     self.wait()
     stream.write(b("GET /hello HTTP/1.0\r\n\r\n"))
     stream.read_until(b("\r\n"), self.stop)
     response = self.wait()
     self.assertEqual(response, b("HTTP/1.0 200 OK\r\n"))
     stream.read_until(b("\r\n\r\n"), self.stop)
     headers = HTTPHeaders.parse(self.wait().decode('latin1'))
     stream.read_bytes(int(headers["Content-Length"]), self.stop)
     body = self.wait()
     self.assertEqual(body, b("Hello world"))
Пример #8
0
    def test_multi_line(self):
        # Lines beginning with whitespace are appended to the previous line
        # with any leading whitespace replaced by a single space.
        # Note that while multi-line headers are a part of the HTTP spec,
        # their use is strongly discouraged.
        data = """\
Foo: bar
 baz
Asdf: qwer
\tzxcv
Foo: even
     more
     lines
""".replace("\n", "\r\n")
        headers = HTTPHeaders.parse(data)
        self.assertEqual(headers["asdf"], "qwer zxcv")
        self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
        self.assertEqual(headers["Foo"], "bar baz,even more lines")
        self.assertEqual(headers.get_list("foo"),
                         ["bar baz", "even more lines"])
        self.assertEqual(sorted(list(headers.get_all())),
                         [("Asdf", "qwer zxcv"), ("Foo", "bar baz"),
                          ("Foo", "even more lines")])
Пример #9
0
    def test_multi_line(self):
        # Lines beginning with whitespace are appended to the previous line
        # with any leading whitespace replaced by a single space.
        # Note that while multi-line headers are a part of the HTTP spec,
        # their use is strongly discouraged.
        data = """\
Foo: bar
 baz
Asdf: qwer
\tzxcv
Foo: even
     more
     lines
""".replace("\n", "\r\n")
        headers = HTTPHeaders.parse(data)
        self.assertEqual(headers["asdf"], "qwer zxcv")
        self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
        self.assertEqual(headers["Foo"], "bar baz,even more lines")
        self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"])
        self.assertEqual(sorted(list(headers.get_all())),
                         [("Asdf", "qwer zxcv"),
                          ("Foo", "bar baz"),
                          ("Foo", "even more lines")])