Пример #1
0
    def adapt_prepare(self):
        """Prepares the special trip parameters."""

        parsed = urlsplit(self.url)
        if parsed.scheme not in ('http', 'https'):
            raise ValueError('Unsupported url scheme: %s' % self.url)
        netloc = parsed.netloc
        if '@' in netloc:
            userpass, _, netloc = netloc.rpartition('@')
        self.host, self.port = split_host_and_port(netloc)
        if self.port is None:
            self.port = 443 if parsed.scheme == 'https' else 80

        self.af = AF_INET

        self.decompress = 'gzip' in \
            self.headers.get('Accept-Encoding', '')

        req_path = ((parsed.path or '/') +
            (('?' + parsed.query) if parsed.query else ''))
        self.start_line = RequestStartLine(self.method, req_path, '')

        self.headers = HTTPHeaders(self.headers)

        if 'Connection' not in self.headers:
            self.headers['Connection'] = 'close'
        if 'Host' not in self.headers:
            self.headers['Host'] = self.host
Пример #2
0
    def _start_request(self, pseudo_headers, headers):
        if "connection" in headers:
            raise ConnectionError(constants.ErrorCode.PROTOCOL_ERROR,
                                  "connection header should not be present")
        if "te" in headers and headers["te"] != "trailers":
            raise StreamError(self.stream_id,
                              constants.ErrorCode.PROTOCOL_ERROR)
        if self.conn.is_client:
            status = int(pseudo_headers[':status'])
            start_line = ResponseStartLine('HTTP/2.0', status,
                                           responses.get(status, ''))
        else:
            for k in (':method', ':scheme', ':path'):
                if k not in pseudo_headers:
                    raise StreamError(self.stream_id,
                                      constants.ErrorCode.PROTOCOL_ERROR)
            start_line = RequestStartLine(pseudo_headers[':method'],
                                          pseudo_headers[':path'], 'HTTP/2.0')
            self._request_start_line = start_line

        if (self.conn.is_client and (self._request_start_line.method == 'HEAD'
                                     or start_line.code == 304)):
            self._incoming_content_remaining = 0
        elif "content-length" in headers:
            self._incoming_content_remaining = int(headers["content-length"])

        if not self.conn.is_client or status >= 200:
            self._phase = constants.HTTPPhase.BODY

        self._delegate_started = True
        self.delegate.headers_received(start_line, headers)
Пример #3
0
def test_no_connection_reuse_with_connection_close(server, io_stream):
    '''Sends '/ping' request to the server with 'Connection: Close' and validate
    that the connection is closed by the server.

    To check that the server closes the connection, read all the data from the
    socket. If the server fails to close the socket, the test will hang for the
    specified async_test's timeout.

    Args:
        server: an instance of `Server`.
        io_stream: an instance of `tornado.iostream.IOStream`.
    '''

    yield io_stream.connect(('localhost', server.opts['port']))

    start_line = RequestStartLine(method='GET',
                                  path='/ping',
                                  version='HTTP/1.1')
    headers = HTTPHeaders({'Connection': 'Close'})

    connection = HTTP1Connection(io_stream, is_client=True)
    yield connection.write_headers(start_line, headers)
    connection.finish()

    response = PingResponseHandler()
    yield connection.read_response(response)

    assert response.start_line.code == requests.codes.ok

    # read all remaining data from the connection and validate it (there should be no data)
    remaining_data = yield io_stream.read_until_close()
    assert len(remaining_data) == 0
Пример #4
0
def test_connection_reuse_for_HTTP_1_1(server, io_stream, num_requests):
    '''Sends multiple '/ping' requests to the server using the same connection.

    The ok handler that just responds with 200 is used for serving requests.
    Keep-Alive is assumed by default for each request as it's HTTP/1.1.

    Args:
        server: an instance of `Server`.
        io_stream:
            an instance of `tornado.iostream.IOStream` that is used for
            consecutive requests.
    '''

    yield io_stream.connect(('localhost', server.opts['port']))

    # common request start line and headers for all ping requests
    start_line = RequestStartLine(method='GET',
                                  path='/ping',
                                  version='HTTP/1.1')
    headers = HTTPHeaders()

    for request in range(num_requests):
        connection = HTTP1Connection(io_stream, is_client=True)
        yield connection.write_headers(start_line, headers)
        connection.finish()

        response = PingResponseHandler()
        yield connection.read_response(response)

        assert response.start_line.code == requests.codes.ok
Пример #5
0
 def headers_received(self, start_line, headers):
     if 'Upgrade' in headers:
         upgrades = set(i.strip() for i in headers['Upgrade'].split(','))
         if constants.HTTP2_CLEAR in upgrades:
             self.connection.upgrading = True
             start_line = RequestStartLine(
                 start_line.method, start_line.path, 'HTTP/2.0')
     self.connection._request_start_line = start_line
     return self.delegate.headers_received(start_line, headers)
Пример #6
0
    def send(self,
             stream=False,
             timeout=None,
             verify=True,
             cert=None,
             proxies=None):
        request = self.request
        connect_timeout, self.read_timeout = parse_timeout(timeout)
        self.stream_body = stream

        # set connect timeout
        with stack_context.ExceptionStackContext(self._handle_exception):
            if connect_timeout:
                self._timeout = self.io_loop.call_later(
                    connect_timeout,
                    stack_context.wrap(
                        functools.partial(self._on_timeout,
                                          'while connecting')))

            # set proxy related info
            proxy = select_proxy(request.url, proxies)
            self.headers = request.headers.copy()
            if proxy:
                proxy = prepend_scheme_if_needed(proxy, 'http')
                parsed = urlparse(proxy)
                scheme, host, port = parsed.scheme, proxy, parsed.port
                port = port or (443 if scheme == 'https' else 80)
                self.start_line = RequestStartLine(request.method, request.url,
                                                   '')
                self.headers.update(get_proxy_headers(proxy))
            else:
                host, port = None, None
                self.start_line = request.start_line

            self.tcp_client.connect(request.host,
                                    request.port,
                                    af=request.af,
                                    ssl_options=self._get_ssl_options(
                                        request, verify, cert),
                                    max_buffer_size=self.max_buffer_size,
                                    source_ip=host,
                                    source_port=port,
                                    callback=self._on_connect)
Пример #7
0
 def _check_header_length(self):
     if (sum(len(f.data) for f in self._header_frames) >
             self.conn.params.max_header_size):
         if self.conn.is_client:
             # TODO: Need tests for client side of headers-too-large.
             # What's the best way to send an error?
             self.delegate.on_connection_close()
         else:
             # write_headers needs a start line so it can tell
             # whether this is a HEAD or not. If we're rejecting
             # the headers we can't know so just make something up.
             # Note that this means the error response body MUST be
             # zero bytes so it doesn't matter whether the client
             # sent a HEAD or a GET.
             self._request_start_line = RequestStartLine(
                 'GET', '/', 'HTTP/2.0')
             start_line = ResponseStartLine('HTTP/2.0', 431,
                                            'Headers too large')
             self.write_headers(start_line, HTTPHeaders())
             self.finish()
         return
Пример #8
0
    def switch_protocols(self, callback):
        stream = self.conn.detach()
        stream.write(utf8(
            "HTTP/1.1 101 Switching Protocols\r\n"
            "Connection: Upgrade\r\n"
            "Upgrade: %s\r\n"
            "\r\n" % constants.HTTP2_CLEAR))
        h2_conn = Connection(stream, False, params=self.http2_params,
                             context=self.context)
        self.server._connections.add(h2_conn)
        h2_conn.start(self.server)
        self.conn = Stream(h2_conn, 1, None, context=self.context)
        h2_conn.streams[1] = self.conn
        self.conn._request_start_line = RequestStartLine(
            self._request_start_line.method,
            self._request_start_line.path,
            'HTTP/2.0')
        yield h2_conn._initial_settings_written

        if self.written_headers is not None:
            self.conn.write_headers(*self.written_headers)
        for write in self.written_chunks:
            self.conn.write(*write)
        if self.write_finished:
            self.conn.finish()
        if self.max_body_size is not None:
            self.conn.set_max_body_size(self.max_body_size)
        if self.body_timeout is not None:
            self.conn.set_body_timeout(self.body_timeout)
        if self.close_callback is not None:
            self.conn.set_close_callback(self.close_callback)
            self.close_callback = None
        self.upgrading = False

        try:
            callback()
        except Exception:
            app_log.error("Exception in callback", exc_info=True)
            self.conn.reset()
Пример #9
0
 def setUp(self):
     super(TestWebSocketBase, self).setUp()
     self.application = Application()
     self.server = HTTPServer(self.application)
     self.socket, self.port = testing.bind_unused_port()
     self.server.add_socket(self.socket)
     self.instance = WebSocketBase(
         self.application,
         HTTPServerRequest(
             method="GET",
             uri='/',
             version="HTTP/1.0",
             headers=HTTPHeaders(),
             body=BytesIO(),
             host=None,
             files=None,
             connection=HTTP1Connection(stream=IOStream(socket.socket()),
                                        is_client=False),
             start_line=RequestStartLine(method='GET',
                                         path='/',
                                         version='HTTP/1.1'),
         ))
     self.instance.open()
Пример #10
0
    def send(self,
             request,
             stream=False,
             timeout=None,
             verify=True,
             cert=None,
             proxies=None):
        """Sends Request object. Returns Response object.

        :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
        :param stream: (optional) Whether to stream the request content.
        :param timeout: (optional) How long to wait for the server to send
            data before giving up, as a float, or a :ref:`(connect timeout,
            read timeout) <timeouts>` tuple.
        :type timeout: float or tuple
        :param verify: (optional) Whether to verify SSL certificates.
        :param cert: (optional) Any user-provided SSL certificate to be trusted.
        :param proxies: (optional) The proxies dictionary to apply to the request.
        :rtype: trip.adapters.MessageDelegate
        """
        if isinstance(timeout, tuple):
            try:
                connect_timeout, read_timeout = timeout
            except ValueError as e:
                # this may raise a string formatting error.
                err = ("Invalid timeout {0}. Pass a (connect, read) "
                       "timeout tuple, or a single float to set "
                       "both timeouts to the same value".format(timeout))
                raise ValueError(err)
        else:
            connect_timeout, read_timeout = timeout, timeout

        timeout_reason = {}
        if connect_timeout:
            timeout_reason['reason'] = 'while connecting'
            self.io_loop.add_timeout(
                self.io_loop.time() + connect_timeout,
                stack_context.wrap(
                    functools.partial(self._on_timeout, timeout_reason)))

        proxy = select_proxy(request.url, proxies)
        if proxy:
            host, port = (proxy.split(':') + [80])[:2]
            port = int(port)
            start_line = RequestStartLine(request.method, request.url, '')
        else:
            host, port = None, None
            start_line = request.start_line

        s = yield self.tcp_client.connect(request.host,
                                          request.port,
                                          af=request.af,
                                          ssl_options=self._get_ssl_options(
                                              request, verify, cert),
                                          max_buffer_size=self.max_buffer_size,
                                          source_ip=host,
                                          source_port=port)

        if not timeout_reason or timeout_reason.get('reason'):
            s.set_nodelay(True)
            timeout_reason.clear()
        else:
            raise gen.Return(
                Timeout(timeout_reason.get('error', 'unknown'),
                        request=request))

        connection = HTTPConnection(
            s,
            HTTP1ConnectionParameters(no_keep_alive=True,
                                      max_header_size=self.max_header_size,
                                      max_body_size=self.max_body_size,
                                      decompress=request.decompress))

        if read_timeout:
            timeout_reason['reason'] = 'during request'
            self.io_loop.add_timeout(
                self.io_loop.time() + connect_timeout,
                stack_context.wrap(
                    functools.partial(self._on_timeout, timeout_reason)))

        connection.write_headers(start_line, request.headers)
        if request.body is not None:
            connection.write(request.body)  #TODO: partial sending
        connection.finish()

        future = Future()

        def handle_response(response):
            if isinstance(response, Exception):
                future.set_exception(response)
            else:
                future.set_result(response)

        resp = MessageDelegate(request, connection, handle_response, stream)

        headers_received = yield connection.read_headers(resp)

        if not stream and headers_received:
            yield connection.read_body(resp)

        if not timeout_reason or timeout_reason.get('reason'):
            timeout_reason.clear()
            resp = yield future
            raise gen.Return(resp)
        else:
            raise gen.Return(
                Timeout(timeout_reason.get('error', 'unknown'),
                        request=request))
Пример #11
0
def test_handler_echo_chunk_exchange(server, io_stream, chunks):
    '''Sends request by chunks and validates received chunks to exactly match the sent ones.

    Test scenario:
    1. Request's headers are exchanged.
       The client sends request's headers and waits for response's headers.
       The echo handler must respond with 200 and the same 'Content-Length'.
    2. Request's body exchanged by chunks.
       The first chunk of data is sent after response's headers are received. After that
       the client waits to receive the same chunk from the handler. Once the first chunk
       is exchanged, the process follows with the second chunk and so on.

    Args:
        server: an instance of `Server`.
        io_stream: an instance of `tornado.iostream.IOStream`.
        chunks: request's body chunks.
    '''
    class ChunkedEchoResponseHandler(HTTPMessageDelegate):
        '''Handler for chunked echo response.

        It's aimed to send request's headers and receive response's headers. After that
        HTTP connection will be detached and underlying stream can be used to send data
        to and to receive data from the client.

        Attributes:
            stream:
                An instance of `tornado.iostream.IOStream`. The stream must be connected
                to a remote address at this point.
        '''
        def __init__(self, stream):
            self.stream = stream
            self.connection = HTTP1Connection(stream, is_client=True)

            self.response_start_line = None
            self.response_headers = None

        def write_headers(self, start_line, headers):
            '''Sends request's start line and headers.

            Returns a `tornado.concurrent.Future` that resolves to None after
            the response's headers have been read and parsed.
            '''
            headers_future = self.connection.write_headers(start_line, headers)
            headers_future.result()
            return self.connection.read_response(self)

        def headers_received(self, start_line, headers):
            '''Called when the HTTP headers have been received and parsed.
            '''
            self.response_start_line = start_line
            self.response_headers = headers
            self.connection.detach()

        def on_connection_close():
            '''Called if the connection is closed without finishing the request.
            '''
            pytest.fail('connection is closed without finishing the request')

    yield io_stream.connect(('localhost', server.opts['port']))

    request_start_line = RequestStartLine(method='POST',
                                          path='/echo',
                                          version='HTTP/1.1')

    content_length = sum(map(len, chunks))
    # header's value must be a string
    request_headers = HTTPHeaders({'Content-Length': str(content_length)})

    handler = ChunkedEchoResponseHandler(io_stream)

    # send request and wait for response's headers
    yield handler.write_headers(request_start_line, request_headers)

    assert handler.response_start_line.code == requests.codes.ok
    assert int(handler.response_headers['content-length']) == content_length

    for chunk in chunks:
        yield io_stream.write(chunk)
        response_chunk = yield io_stream.read_bytes(len(chunk))
        assert response_chunk == chunk
Пример #12
0
def test_graceful_close_client_close_half_request(server, io_stream, http_connection, data,
                                                  server_delay):
    '''Sends request's headers and half of body and checks that server does graceful close.

    If server receives request and responds with error it should read request's
    body. If client closes the connection and sends only part of the request server
    should not fail on closed connection.

    Args:
        server: an instance of `Server`.
        io_stream: an instance of `tornado.iostream.IOStream`.
        http_connection:
            An instance of `tornado.http1connection.HTTP1Connection` that uses `io_stream`
            as underlying stream.
        data: request's body.
        server_delay: part of request's body to receive before sending error response.
    '''

    yield io_stream.connect(('localhost', server.opts['port']))

    # Start reading server's log until request's access log entry
    request_log_future = server.process.stdout.read_until('access_log_entry')

    code = 403
    response_data = 'response'
    url = '/delayed_error?{}'.format(
        urllib.urlencode({
            'code': code,
            'delay': server_delay,
            'response': response_data,
        })
    )

    start_line = RequestStartLine(method='POST', path=url, version='HTTP/1.1')
    headers = HTTPHeaders({'Content-Length': str(len(data))})
    yield http_connection.write_headers(start_line, headers)
    yield http_connection.write(data[:len(data) / 2])

    # At this point server should respond with error
    # handler will receive the response and close the connection
    handler = ResponseHandler(http_connection)
    yield http_connection.read_response(handler)

    assert handler.start_line.code == code
    assert handler.headers['Content-Length'] == str(len(response_data))
    assert handler.headers['Connection'] == 'Close'
    assert handler.data == response_data

    # Ensure the connection in closed
    with pytest.raises(tornado.iostream.StreamClosedError):
        yield io_stream.write('test')

    request_log = yield request_log_future
    request_log_lines = request_log.split('\n')

    graceful_close_log_start = [
        log_line for log_line in request_log_lines
        if 'gracefully close the connection' in log_line
    ]
    # graceful close log must apper only once
    assert len(graceful_close_log_start) == 1
    # state must include graceful_close
    assert 'graceful_close' in graceful_close_log_start[0]

    received_eof_log_lines = [
        log_line for log_line in request_log_lines
        if 'received new data' in log_line and 'End of file' in log_line
    ]
    # there should be only one log line with receive error
    assert len(received_eof_log_lines) == 1
    # state must include graceful_close
    assert 'graceful_close' in received_eof_log_lines[0]
    # log line must be printed on DEBUG log level as it's not an error
    # during graceful close
    assert (
        'DEBUG' in received_eof_log_lines[0] and
        'ERROR' not in received_eof_log_lines[0]
    )
Пример #13
0
def test_graceful_close_after_headers_server_close(server, io_stream, http_connection, data):
    '''Sends request's headers and checks that server does graceful close.

    If server receives request's and responds with error it should read request's
    body. If client doesn't close the connection and sends the whole request server
    should close the connection.

    Args:
        server: an instance of `Server`.
        io_stream: an instance of `tornado.iostream.IOStream`.
        http_connection:
            An instance of `tornado.http1connection.HTTP1Connection` that uses `io_stream`
            as underlying stream.
        data: request's body.
    '''

    yield io_stream.connect(('localhost', server.opts['port']))

    # Start reading server's log until request's access log entry
    request_log_future = server.process.stdout.read_until('access_log_entry')

    code = 403
    response_data = 'response'
    url = '/delayed_error?{}'.format(
        urllib.urlencode({
            'code': code,
            'delay': 0,
            'response': response_data,
        })
    )

    start_line = RequestStartLine(method='POST', path=url, version='HTTP/1.1')
    headers = HTTPHeaders({'Content-Length': str(len(data))})
    yield http_connection.write_headers(start_line, headers)

    # At this point server should respond with error
    handler = DetachedResponseHandler(http_connection)
    yield http_connection.read_response(handler)

    assert handler.start_line.code == code
    assert handler.headers['Content-Length'] == str(len(response_data))
    assert handler.headers['Connection'] == 'Close'

    # Continue sending request's body
    yield io_stream.write(data)

    # read the response's body
    response = yield io_stream.read_bytes(len(response_data))
    assert response == response_data

    # At this point server should close the connection
    with pytest.raises(tornado.iostream.StreamClosedError):
        yield io_stream.read_bytes(1)

    request_log = yield request_log_future
    request_log_lines = request_log.split('\n')

    graceful_close_log_start = [
        log_line for log_line in request_log_lines
        if 'gracefully close the connection' in log_line
    ]
    # graceful close log must apper only once
    assert len(graceful_close_log_start) == 1
    # state must include graceful_close
    assert 'graceful_close' in graceful_close_log_start[0]
Пример #14
0
        def create_qactuar_handler(self, scope: Scope) -> RequestHandler:
            # noinspection PyAbstractClass,PyMethodParameters
            class QactuarHandler(self.handler_type):  # type: ignore
                def __init__(
                    inner_self,
                    tornado_application: Application,
                    tornado_request: HTTPServerRequest,
                    logger: Logger,
                    **kwargs: Dict[Any, Any],
                ) -> None:
                    super().__init__(tornado_application, tornado_request,
                                     **kwargs)
                    inner_self._qactuar_body: List[bytes] = []
                    inner_self._qactuar_headers: Headers = []
                    inner_self._logger = logger

                def write(inner_self, chunk: Union[str, bytes, dict]) -> None:
                    super().write(chunk)
                    inner_self._qactuar_body.append(to_bytes(chunk))

                def add_header(inner_self, name: str,
                               value: _HeaderTypes) -> None:
                    super().add_header(name, value)
                    inner_self._qactuar_headers.append(
                        (to_bytes(name), to_bytes(value)))

                def set_header(inner_self, name: str,
                               value: _HeaderTypes) -> None:
                    super().set_header(name, value)
                    inner_self._qactuar_headers.append(
                        (to_bytes(name), to_bytes(value)))

            if self.request_message and "body" in self.request_message:
                body = self.request_message["body"]
            else:
                body = b""
            if self.request_message and "headers" in self.request_message:
                headers = scope["headers"]
            else:
                headers = []
            headers = HTTPHeaders({
                header[0].decode("utf-8"): header[1].decode("utf-8")
                for header in headers
            })
            request_start_line = RequestStartLine(scope["method"],
                                                  scope["path"],
                                                  scope["http_version"])

            # noinspection PyTypeChecker
            http_connection = HTTP1Connection(QactuarStream(), False)
            http_connection._request_start_line = request_start_line
            http_connection._request_headers = headers

            request = HTTPServerRequest(
                method=scope["method"],
                uri=scope["path"],
                version=scope["http_version"],
                headers=headers,
                body=body,
                host=scope["server"][0],
                connection=http_connection,
                start_line=request_start_line,
            )

            handler = QactuarHandler(Application(), request, self.child_log)
            handler._transforms = []
            handler.application.transforms = []

            return handler