示例#1
0
    def test_resizing_header_table(self):
        # We need to encode a substantial number of headers, to populate the
        # header table.
        e = Encoder()
        header_set = [
            (':method', 'GET'),
            (':scheme', 'https'),
            (':path', '/some/path'),
            (':authority', 'www.example.com'),
            ('custom-key', 'custom-value'),
            (
                "user-agent",
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) "
                "Gecko/20100101 Firefox/16.0",
            ),
            (
                "accept",
                "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;"
                "q=0.8",
            ),
            ('X-Lukasa-Test', '88989'),
        ]
        e.encode(header_set, huffman=True)

        # Resize the header table to a size so small that nothing can be in it.
        e.header_table_size = 40
        assert len(e.header_table.dynamic_entries) == 0
示例#2
0
    def test_evicting_header_table_objects(self):
        e = Encoder()

        # Set the header table size large enough to include one header.
        e.header_table_size = 66
        header_set = [('a', 'b'), ('long-custom-header', 'longish value')]
        e.encode(header_set)

        assert len(e.header_table.dynamic_entries) == 1
示例#3
0
    def test_request_examples_with_huffman(self):
        """
        This section shows the same examples as the previous section, but
        using Huffman encoding for the literal values.
        """
        e = Encoder()
        first_header_set = [
            (':method', 'GET',),
            (':scheme', 'http',),
            (':path', '/',),
            (':authority', 'www.example.com'),
        ]
        first_header_table = [(':authority', 'www.example.com')]
        first_result = (
            b'\x82\x86\x84\x41\x8c\xf1\xe3\xc2\xe5\xf2:k\xa0\xab\x90\xf4\xff'
        )

        assert e.encode(first_header_set, huffman=True) == first_result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in first_header_table
        ]

        second_header_table = [
            ('cache-control', 'no-cache'),
            (':authority', 'www.example.com')
        ]
        second_header_set = [
            (':method', 'GET',),
            (':scheme', 'http',),
            (':path', '/',),
            (':authority', 'www.example.com',),
            ('cache-control', 'no-cache'),
        ]
        second_result = b'\x82\x86\x84\xbeX\x86\xa8\xeb\x10d\x9c\xbf'

        assert e.encode(second_header_set, huffman=True) == second_result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in second_header_table
        ]

        third_header_set = [
            (':method', 'GET',),
            (':scheme', 'https',),
            (':path', '/index.html',),
            (':authority', 'www.example.com',),
            ('custom-key', 'custom-value'),
        ]
        third_result = (
            b'\x82\x87\x85\xbf'
            b'@\x88%\xa8I\xe9[\xa9}\x7f\x89%\xa8I\xe9[\xb8\xe8\xb4\xbf'
        )

        assert e.encode(third_header_set, huffman=True) == third_result
        assert len(e.header_table.dynamic_entries) == 3
示例#4
0
    def test_request_examples_without_huffman(self):
        """
        This section shows several consecutive header sets, corresponding to
        HTTP requests, on the same connection.
        """
        e = Encoder()
        first_header_set = [
            (':method', 'GET',),
            (':scheme', 'http',),
            (':path', '/',),
            (':authority', 'www.example.com'),
        ]
        # We should have :authority in first_header_table since we index it
        first_header_table = [(':authority', 'www.example.com')]
        first_result = b'\x82\x86\x84\x41\x0fwww.example.com'

        assert e.encode(first_header_set, huffman=False) == first_result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in first_header_table
        ]

        second_header_set = [
            (':method', 'GET',),
            (':scheme', 'http',),
            (':path', '/',),
            (':authority', 'www.example.com',),
            ('cache-control', 'no-cache'),
        ]
        second_header_table = [
            ('cache-control', 'no-cache'),
            (':authority', 'www.example.com')
        ]
        second_result = b'\x82\x86\x84\xbeX\x08no-cache'

        assert e.encode(second_header_set, huffman=False) == second_result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in second_header_table
        ]

        third_header_set = [
            (':method', 'GET',),
            (':scheme', 'https',),
            (':path', '/index.html',),
            (':authority', 'www.example.com',),
            ('custom-key', 'custom-value'),
        ]
        third_result = (
            b'\x82\x87\x85\xbf@\ncustom-key\x0ccustom-value'
        )

        assert e.encode(third_header_set, huffman=False) == third_result
        # Don't check the header table here, it's just too complex to be
        # reliable. Check its length though.
        assert len(e.header_table.dynamic_entries) == 3
示例#5
0
    def save(self):
        encoder = Encoder()
        payload = encoder.encode(self)
        # self.payload = payload
        self.length = len(payload)

        base = super().save()
        logger.debug(base + payload)
        return base + payload
示例#6
0
    def test_headers_generator(self):
        e = Encoder()

        def headers_generator():
            return (("k" + str(i), "v" + str(i)) for i in range(3))

        header_set = headers_generator()
        out = e.encode(header_set)
        assert Decoder().decode(out) == list(headers_generator())
示例#7
0
文件: serve.py 项目: SuhwanCha/HTTP2
 def mkdata(self,data):
     E = Encoder()
     data_length = len(data)
     header = {':content-length':data_length,':status':200}
     header = E.encode(header)
     length = len(header)
     send_data = (length).to_bytes(3, byteorder='big')+b'\x01'+b'\x04'+b'\x00\x00\x00\x01'+header
     print(send_data)
     return (send_data)
示例#8
0
    def test_resizing_header_table_sends_multiple_updates(self):
        e = Encoder()

        e.header_table_size = 40
        e.header_table_size = 100
        e.header_table_size = 40

        header_set = [(':method', 'GET')]
        out = e.encode(header_set, huffman=True)
        assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
示例#9
0
    def test_indexed_header_field_from_static_table(self):
        e = Encoder()
        e.header_table_size = 0
        header_set = {':method': 'GET'}
        result = b'\x82'

        # Make sure we don't emit an encoding context update.
        e.header_table.resized = False

        assert e.encode(header_set, huffman=False) == result
        assert list(e.header_table.dynamic_entries) == []
示例#10
0
    def test_indexed_header_field(self):
        """
        The header field representation uses an indexed header field, from
        the static table.
        """
        e = Encoder()
        header_set = {':method': 'GET'}
        result = b'\x82'

        assert e.encode(header_set, huffman=False) == result
        assert list(e.header_table.dynamic_entries) == []
示例#11
0
    def test_setting_table_size_to_the_same_does_nothing(self):
        e = Encoder()

        # Set the header table size to the default.
        e.header_table_size = 4096

        # Now encode a header set. Just a small one, with a well-defined
        # output.
        header_set = [(':method', 'GET')]
        out = e.encode(header_set, huffman=True)

        assert out == b'\x82'
示例#12
0
    def save(self):

        encoder = Encoder()
        payload = encoder.encode(self)
        self.length = len(payload)

        decoder = Decoder()
        check = decoder.decode(payload)
        logger.info('payload of the header is {}'.format(check))

        base = super().save()
        return base + payload
示例#13
0
    def test_resizing_header_table_sends_context_update(self):
        e = Encoder()

        # Resize the header table to a size so small that nothing can be in it.
        e.header_table_size = 40

        # Now, encode a header set. Just a small one, with a well-defined
        # output.
        header_set = [(':method', 'GET')]
        out = e.encode(header_set, huffman=True)

        assert out == b'?\t\x82'
示例#14
0
    def test_indexed_literal_header_field_with_indexing(self):
        """
        The header field representation uses an indexed name and a literal
        value and performs incremental indexing.
        """
        e = Encoder()
        header_set = {':path': '/sample/path'}
        result = b'\x44\x0c/sample/path'

        assert e.encode(header_set, huffman=False) == result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in header_set.items()
        ]
示例#15
0
    def test_literal_header_field_with_indexing(self):
        """
        The header field representation uses a literal name and a literal
        value.
        """
        e = Encoder()
        header_set = {'custom-key': 'custom-header'}
        result = b'\x40\x0acustom-key\x0dcustom-header'

        assert e.encode(header_set, huffman=False) == result
        assert list(e.header_table.dynamic_entries) == [
            (n.encode('utf-8'), v.encode('utf-8'))
            for n, v in header_set.items()
        ]
示例#16
0
 def test_sensitive_headers(self):
     """
     Test encoding header values
     """
     e = Encoder()
     result = (b'\x82\x14\x88\x63\xa1\xa9' +
               b'\x32\x08\x73\xd0\xc7\x10' +
               b'\x87\x25\xa8\x49\xe9\xea' +
               b'\x5f\x5f\x89\x41\x6a\x41' +
               b'\x92\x6e\xe5\x35\x52\x9f')
     header_set = [
         (':method', 'GET', True),
         (':path', '/jimiscool/', True),
         ('customkey', 'sensitiveinfo', True),
     ]
     assert e.encode(header_set, huffman=True) == result
示例#17
0
    def test_can_encode_a_story_with_huffman(self, raw_story):
        d = Decoder()
        e = Encoder()

        for case in raw_story['cases']:
            # The input headers are a list of dicts, which is annoying.
            input_headers = [
                (item[0], item[1])
                for header in case['headers']
                for item in header.items()
            ]

            encoded = e.encode(input_headers, huffman=True)
            decoded_headers = d.decode(encoded)

            assert input_headers == decoded_headers
示例#18
0
    def test_resizing_header_table_to_same_size_ignored(self):
        e = Encoder()

        # These size changes should be ignored
        e.header_table_size = 4096
        e.header_table_size = 4096
        e.header_table_size = 4096

        # These size changes should be encoded
        e.header_table_size = 40
        e.header_table_size = 100
        e.header_table_size = 40

        header_set = [(':method', 'GET')]
        out = e.encode(header_set, huffman=True)
        assert out == b'\x3F\x09\x3F\x45\x3F\x09\x82'
示例#19
0
 def test_sensitive_headers_with_header_tuples(self):
     """
     A header field stored in a NeverIndexedHeaderTuple emits a
     representation that forbids indexing.
     """
     e = Encoder()
     result = (b'\x82\x14\x88\x63\xa1\xa9' +
               b'\x32\x08\x73\xd0\xc7\x10' +
               b'\x87\x25\xa8\x49\xe9\xea' +
               b'\x5f\x5f\x89\x41\x6a\x41' +
               b'\x92\x6e\xe5\x35\x52\x9f')
     header_set = [
         NeverIndexedHeaderTuple(':method', 'GET'),
         NeverIndexedHeaderTuple(':path', '/jimiscool/'),
         NeverIndexedHeaderTuple('customkey', 'sensitiveinfo'),
     ]
     assert e.encode(header_set, huffman=True) == result
示例#20
0
class Stream:
    def __init__(self, connection, stream_id):
        self.connection = connection
        self.stream_id = stream_id
        self.state = StreamState.IDLE
        self.header_encoder = Encoder()

    def process(self, frame):
        if isinstance(frame, HeadersFrame):
            self.state = StreamState.OPEN

            if 'END_HEADERS' in frame.flags:
                data = str.encode(self.connection.fingerprint)

                headers = {
                    'content-length': len(data),
                    'content-type': 'text/html; charset=UTF-8',
                    ':status': 200,
                    'access-control-allow-origin': '*'
                }
                headers_frame = HeadersFrame(
                    self.stream_id,
                    self.header_encoder.encode(headers),
                    flags=['END_HEADERS'])
                self.send_headers(headers_frame)

                data_frame = DataFrame(self.stream_id,
                                       data,
                                       flags=['END_STREAM'])
                self.send_data(data_frame)
                self.close_connection()

        elif isinstance(frame, RstStreamFrame) or isinstance(
                frame, GoAwayFrame):
            self.close_connection()

    def send_headers(self, headers):
        self.connection.send_frame(headers)

    def send_data(self, data):
        self.connection.send_frame(data)

    def close_connection(self):
        self.state = StreamState.CLOSED
        self.connection.close()
示例#21
0
    def test_can_encode_a_story_no_huffman(self, raw_story):
        d = Decoder()
        e = Encoder()

        for case in raw_story['cases']:
            # The input headers are a list of dicts, which is annoying.
            input_headers = [
                (item[0], item[1])
                for header in case['headers']
                for item in header.items()
            ]

            encoded = e.encode(input_headers, huffman=False)
            decoded_headers = d.decode(encoded)

            assert input_headers == decoded_headers
            assert all(
                isinstance(header, HeaderTuple) for header in decoded_headers
            )
示例#22
0
class Demo(object):
    def __init__(self):
        self.encoder = Encoder()
        self.decoder = Decoder()

    def run(self, headers):
        origin_len = 0
        encoded_len = 0
        print "=" * 16
        for header in headers:
            header_tuple = header_dict_to_tuple(header)
            encoded = self.encoder.encode([header_tuple])

            encoded_len += len(encoded)
            origin_len += len(header_tuple[0]) + len(header_tuple[1])
            match = self.decoder.header_table.search(header_tuple[0], header_tuple[1])

            print "{0}=>{1}".format(header, binascii.hexlify(encoded), translate_match(match))
            print translate_match(match)

            curr_state = None
            length = 0
            for b in encoded:
                one_byte_data = bin(struct.unpack("B", b)[0])[2:].zfill(8)
                curr_state, content, length = translate_byte(one_byte_data, match, curr_state, length)
                if content:
                    print "{0} ({1})".format(one_byte_data, content)

            self.decoder.decode(encoded)
            print
        print "Decompressed from {0} to {1}".format(origin_len, encoded_len)
        print "=" * 16

    def pretty_print_table(self, table):
        for (k, v) in table.dynamic_entries:
            print "{0}=>{1}".format(k, v)

    def tables(self):
        self.pretty_print_table(self.encoder.header_table)
示例#23
0
    def test_ordering_applies_to_encoding(self, special_keys, boring_keys):
        """
        When encoding a dictionary the special keys all appear first.
        """
        def _prepend_colon(k):
            if isinstance(k, str):
                return ':' + k
            else:
                return b':' + k

        special_keys = set(map(_prepend_colon, special_keys))
        input_dict = {
            k: b'testval' for k in itertools.chain(
                special_keys,
                boring_keys
            )
        }
        e = Encoder()
        d = Decoder()
        encoded = e.encode(input_dict)
        decoded = iter(d.decode(encoded, raw=True))

        received_special = set()
        received_boring = set()
        expected_special = set(map(_to_bytes, special_keys))
        expected_boring = set(map(_to_bytes, boring_keys))

        for _ in special_keys:
            k, _ = next(decoded)
            received_special.add(k)
        for _ in boring_keys:
            k, _ = next(decoded)
            received_boring.add(k)

        assert expected_special == received_special
        assert expected_boring == received_boring
示例#24
0
def f**k(path):
    settings = []
    f = SettingsFrame(0)
    f.settings = {
        f.HEADER_TABLE_SIZE: 0xff,
        f.ENABLE_PUSH: 0,
        f.MAX_CONCURRENT_STREAMS: 5,
        f.INITIAL_WINDOW_SIZE: 0xff,
        f.MAX_HEADER_LIST_SIZE: 0xff
    }
    # settings.append(f.serialize())
    # f = WindowUpdateFrame(1)
    settings.append(f.serialize())
    f = HeadersFrame(1)
    f.flags.add('END_STREAM')
    f.flags.add('END_HEADERS')
    header_data = [(':method', 'GET'), (':scheme', 'http'),
                   (':path', '/' + path), (':authority', '127.0.0.1:8080'),
                   ('cookie', 'v'), ('accept', '*')]
    ee = Encoder()
    f.data = ee.encode(header_data)
    settings.append(f.serialize())
    data = b''.join(settings)
    return quote(data)
示例#25
0
class Http2Worker(HttpWorker):
    version = "2"

    def __init__(self, logger, ip_manager, config, ssl_sock, close_cb,
                 retry_task_cb, idle_cb, log_debug_data):
        super(Http2Worker,
              self).__init__(logger, ip_manager, config, ssl_sock, close_cb,
                             retry_task_cb, idle_cb, log_debug_data)

        self.network_buffer_size = 65535

        # Google http/2 time out is 4 mins.
        self.ssl_sock.settimeout(240)
        self._sock = BufferedSocket(ssl_sock, self.network_buffer_size)

        self.next_stream_id = 1
        self.streams = {}
        self.last_ping_time = time.time()
        self.continue_timeout = 0

        # count ping not ACK
        # increase when send ping
        # decrease when recv ping ack
        # if this in not 0, don't accept request.
        self.ping_on_way = 0
        self.accept_task = False

        # request_lock
        self.request_lock = threading.Lock()

        # all send frame must put to this queue
        # then send by send_loop
        # every frame put to this queue must allowed by stream window and connection window
        # any data frame blocked by connection window should put to self.blocked_send_frames
        self.send_queue = Queue.Queue()
        self.encoder = Encoder()
        self.decoder = Decoder()

        # keep blocked data frame in this buffer
        # which is allowed by stream window but blocked by connection window.
        # They will be sent when connection window open
        self.blocked_send_frames = []

        # Values for the settings used on an HTTP/2 connection.
        # will send to remote using Setting Frame
        self.local_settings = {
            SettingsFrame.INITIAL_WINDOW_SIZE: 16 * 1024 * 1024,
            SettingsFrame.SETTINGS_MAX_FRAME_SIZE: 256 * 1024
        }
        self.local_connection_initial_windows = 32 * 1024 * 1024
        self.local_window_manager = FlowControlManager(
            self.local_connection_initial_windows)

        # changed by server, with SettingFrame
        self.remote_settings = {
            SettingsFrame.INITIAL_WINDOW_SIZE: DEFAULT_WINDOW_SIZE,
            SettingsFrame.SETTINGS_MAX_FRAME_SIZE: DEFAULT_MAX_FRAME,
            SettingsFrame.MAX_CONCURRENT_STREAMS: 100
        }

        #self.remote_window_size = DEFAULT_WINDOW_SIZE
        self.remote_window_size = 32 * 1024 * 1024

        # send Setting frame before accept task.
        self._send_preamble()

        threading.Thread(target=self.send_loop).start()
        threading.Thread(target=self.recv_loop).start()

    # export api
    def request(self, task):
        if not self.keep_running:
            # race condition
            self.retry_task_cb(task)
            return

        if len(self.streams) > self.config.http2_max_concurrent:
            self.accept_task = False

        task.set_state("h2_req")
        self.request_task(task)

    def encode_header(self, headers):
        return self.encoder.encode(headers)

    def request_task(self, task):
        with self.request_lock:
            # create stream to process task
            stream_id = self.next_stream_id

            # http/2 client use odd stream_id
            self.next_stream_id += 2

            stream = Stream(
                self.logger, self.config, self, self.ip, stream_id, task,
                self._send_cb, self._close_stream_cb, self.encode_header,
                self.decoder,
                FlowControlManager(
                    self.local_settings[SettingsFrame.INITIAL_WINDOW_SIZE]),
                self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE],
                self.remote_settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE])
            self.streams[stream_id] = stream
            stream.start_request()

    def send_loop(self):
        while self.keep_running:
            frame = self.send_queue.get(True)
            if not frame:
                # None frame means exist
                break

            if self.config.http2_show_debug:
                self.logger.debug("%s Send:%s", self.ip, str(frame))
            data = frame.serialize()
            try:
                self._sock.send(data, flush=False)
                # don't flush for small package
                # reduce send api call

                if self.send_queue._qsize():
                    continue

                # wait for payload frame
                time.sleep(0.01)
                # combine header and payload in one tcp package.
                if not self.send_queue._qsize():
                    self._sock.flush()

                self.last_send_time = time.time()
            except socket.error as e:
                if e.errno not in (errno.EPIPE, errno.ECONNRESET):
                    self.logger.warn("%s http2 send fail:%r", self.ip, e)
                else:
                    self.logger.exception("send error:%r", e)

                self.close("send fail:%r" % e)
            except Exception as e:
                self.logger.debug("http2 %s send error:%r", self.ip, e)
                self.close("send fail:%r" % e)

    def recv_loop(self):
        while self.keep_running:
            try:
                self._consume_single_frame()
            except Exception as e:
                self.logger.exception("recv fail:%r", e)
                self.close("recv fail:%r" % e)

    def get_rtt_rate(self):
        return self.rtt + len(self.streams) * 3000

    def close(self, reason="conn close"):
        self.keep_running = False
        self.accept_task = False
        # Notify loop to exit
        # This function may be call by out side http2
        # When gae_proxy found the appid or ip is wrong
        self.send_queue.put(None)

        for stream in self.streams.values():
            if stream.task.responsed:
                # response have send to client
                # can't retry
                stream.close(reason=reason)
            else:
                self.retry_task_cb(stream.task)
        self.streams = {}
        super(Http2Worker, self).close(reason)

    def send_ping(self):
        p = PingFrame(0)
        p.opaque_data = struct.pack("!d", time.time())
        self.send_queue.put(p)
        self.last_ping_time = time.time()
        self.ping_on_way += 1

    def _send_preamble(self):
        self.send_queue.put(RawFrame(b'PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n'))

        f = SettingsFrame(0)
        f.settings[SettingsFrame.ENABLE_PUSH] = 0
        f.settings[SettingsFrame.INITIAL_WINDOW_SIZE] = self.local_settings[
            SettingsFrame.INITIAL_WINDOW_SIZE]
        f.settings[
            SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = self.local_settings[
                SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
        self._send_cb(f)

        # update local connection windows size
        f = WindowUpdateFrame(0)
        f.window_increment = self.local_connection_initial_windows - DEFAULT_WINDOW_SIZE
        self._send_cb(f)

    def increase_remote_window_size(self, inc_size):
        # check and send blocked frames if window allow
        self.remote_window_size += inc_size
        #self.logger.debug("%s increase send win:%d result:%d", self.ip, inc_size, self.remote_window_size)
        while len(self.blocked_send_frames):
            frame = self.blocked_send_frames[0]
            if len(frame.data) > self.remote_window_size:
                return

            self.remote_window_size -= len(frame.data)
            self.send_queue.put(frame)
            self.blocked_send_frames.pop(0)

        if self.keep_running and \
                self.accept_task == False and \
                len(self.streams) < self.config.http2_max_concurrent and \
                self.remote_window_size > 10000:
            self.accept_task = True
            self.idle_cb()

    def _send_cb(self, frame):
        # can called by stream
        # put to send_blocked if connection window not allow,
        if frame.type == DataFrame.type:
            if len(frame.data) > self.remote_window_size:
                self.blocked_send_frames.append(frame)
                self.accept_task = False
                return
            else:
                self.remote_window_size -= len(frame.data)
                self.send_queue.put(frame)
        else:
            self.send_queue.put(frame)

    def _close_stream_cb(self, stream_id, reason):
        # call by stream to remove from streams list
        # self.logger.debug("%s close stream:%d %s", self.ssl_sock.ip, stream_id, reason)
        try:
            del self.streams[stream_id]
        except KeyError:
            pass

        if self.keep_running and \
                len(self.streams) < self.config.http2_max_concurrent and \
                self.remote_window_size > 10000:
            self.accept_task = True
            self.idle_cb()

        self.processed_tasks += 1

    def _consume_single_frame(self):
        try:
            header = self._sock.recv(9)
        except Exception as e:
            self.logger.debug("%s _consume_single_frame:%r, inactive time:%d",
                              self.ip, e,
                              time.time() - self.last_recv_time)
            self.close("ConnectionReset:%r" % e)
            return
        self.last_recv_time = time.time()

        # Parse the header. We can use the returned memoryview directly here.
        frame, length = Frame.parse_frame_header(header)

        if length > FRAME_MAX_ALLOWED_LEN:
            self.logger.error(
                "%s Frame size exceeded on stream %d (received: %d, max: %d)",
                self.ip, frame.stream_id, length, FRAME_MAX_LEN)
            # self._send_rst_frame(frame.stream_id, 6) # 6 = FRAME_SIZE_ERROR

        try:
            data = self._recv_payload(length)
        except Exception as e:
            self.close("ConnectionReset:%r" % e)
            return

        self._consume_frame_payload(frame, data)

    def _recv_payload(self, length):
        if not length:
            return memoryview(b'')

        buffer = bytearray(length)
        buffer_view = memoryview(buffer)
        index = 0
        data_length = -1

        # _sock.recv(length) might not read out all data if the given length
        # is very large. So it should be to retrieve from socket repeatedly.
        while length and data_length:
            data = self._sock.recv(length)
            self.last_recv_time = time.time()
            data_length = len(data)
            end = index + data_length
            buffer_view[index:end] = data[:]
            length -= data_length
            index = end

        return buffer_view[:end]

    def _consume_frame_payload(self, frame, data):
        frame.parse_body(data)

        if self.config.http2_show_debug:
            self.logger.debug("%s Recv:%s", self.ip, str(frame))

        # Maintain our flow control window. We do this by delegating to the
        # chosen WindowManager.
        if frame.type == DataFrame.type:

            size = frame.flow_controlled_length
            increment = self.local_window_manager._handle_frame(size)

            if increment < 0:
                self.logger.warn("increment:%d", increment)
            elif increment:
                #self.logger.debug("%s frame size:%d increase win:%d", self.ip, size, increment)
                w = WindowUpdateFrame(0)
                w.window_increment = increment
                self._send_cb(w)

        elif frame.type == PushPromiseFrame.type:
            self.logger.error(
                "%s receive push frame",
                self.ip,
            )

        # Work out to whom this frame should go.
        if frame.stream_id != 0:
            try:
                stream = self.streams[frame.stream_id]
                stream.receive_frame(frame)
            except KeyError as e:
                if frame.type not in [WindowUpdateFrame.type]:
                    self.logger.exception(
                        "%s Unexpected stream identifier %d, frame.type:%s e:%r",
                        self.ip, frame.stream_id, frame, e)
        else:
            self.receive_frame(frame)

    def receive_frame(self, frame):
        if frame.type == WindowUpdateFrame.type:
            # self.logger.debug("WindowUpdateFrame %d", frame.window_increment)
            self.increase_remote_window_size(frame.window_increment)

        elif frame.type == PingFrame.type:
            if 'ACK' in frame.flags:
                ping_time = struct.unpack("!d", frame.opaque_data)[0]
                time_now = time.time()
                rtt = (time_now - ping_time) * 1000
                if rtt < 0:
                    self.logger.error("rtt:%f ping_time:%f now:%f", rtt,
                                      ping_time, time_now)
                self.rtt = rtt
                self.ping_on_way -= 1
                #self.logger.debug("RTT:%d, on_way:%d", self.rtt, self.ping_on_way)
                if self.keep_running and self.ping_on_way == 0:
                    self.accept_task = True
            else:
                # The spec requires us to reply with PING+ACK and identical data.
                p = PingFrame(0)
                p.flags.add('ACK')
                p.opaque_data = frame.opaque_data
                self._send_cb(p)

        elif frame.type == SettingsFrame.type:
            if 'ACK' not in frame.flags:
                # send ACK as soon as possible
                f = SettingsFrame(0)
                f.flags.add('ACK')
                self._send_cb(f)

                # this may trigger send DataFrame blocked by remote window
                self._update_settings(frame)
            else:
                self.accept_task = True
                self.idle_cb()

        elif frame.type == GoAwayFrame.type:
            # If we get GoAway with error code zero, we are doing a graceful
            # shutdown and all is well. Otherwise, throw an exception.

            # If an error occured, try to read the error description from
            # code registry otherwise use the frame's additional data.
            error_string = frame._extra_info()
            time_cost = time.time() - self.last_recv_time
            if frame.additional_data != "session_timed_out":
                self.logger.warn("goaway:%s, t:%d", error_string, time_cost)

            self.close("GoAway:%s inactive time:%d" %
                       (error_string, time_cost))

        elif frame.type == BlockedFrame.type:
            self.logger.warn("%s get BlockedFrame", self.ip)
        elif frame.type in FRAMES:
            # This frame isn't valid at this point.
            #raise ValueError("Unexpected frame %s." % frame)
            self.logger.error("%s Unexpected frame %s.", self.ip, frame)
        else:  # pragma: no cover
            # Unexpected frames belong to extensions. Just drop it on the
            # floor, but log so that users know that something happened.
            self.logger.error("%s Received unknown frame, type %d", self.ip,
                              frame.type)

    def _update_settings(self, frame):
        if SettingsFrame.HEADER_TABLE_SIZE in frame.settings:
            new_size = frame.settings[SettingsFrame.HEADER_TABLE_SIZE]

            self.remote_settings[SettingsFrame.HEADER_TABLE_SIZE] = new_size
            #self.encoder.header_table_size = new_size

        if SettingsFrame.INITIAL_WINDOW_SIZE in frame.settings:
            newsize = frame.settings[SettingsFrame.INITIAL_WINDOW_SIZE]
            oldsize = self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE]
            delta = newsize - oldsize

            for stream in self.streams.values():
                stream.remote_window_size += delta

            self.remote_settings[SettingsFrame.INITIAL_WINDOW_SIZE] = newsize

        if SettingsFrame.SETTINGS_MAX_FRAME_SIZE in frame.settings:
            new_size = frame.settings[SettingsFrame.SETTINGS_MAX_FRAME_SIZE]
            if not (FRAME_MAX_LEN <= new_size <= FRAME_MAX_ALLOWED_LEN):
                self.logger.error(
                    "%s Frame size %d is outside of allowed range", self.ip,
                    new_size)

                # Tear the connection down with error code PROTOCOL_ERROR
                self.close("bad max frame size")
                #error_string = ("Advertised frame size %d is outside of range" % (new_size))
                #raise ConnectionError(error_string)
                return

            self.remote_settings[
                SettingsFrame.SETTINGS_MAX_FRAME_SIZE] = new_size

            for stream in self.streams.values():
                stream.max_frame_size += new_size

    def get_trace(self):
        out_list = []
        out_list.append(" continue_timeout:%d" % self.continue_timeout)
        out_list.append(" processed:%d" % self.processed_tasks)
        out_list.append(" h2.stream_num:%d" % len(self.streams))
        out_list.append(" sni:%s, host:%s" %
                        (self.ssl_sock.sni, self.ssl_sock.host))
        return ",".join(out_list)

    def check_active(self, now):
        if not self.keep_running or len(self.streams) == 0:
            return

        for sid in self.streams.keys():
            try:
                stream = self.streams[sid]
                stream.check_timeout(now)
            except:
                pass

        if len(self.streams) > 0 and\
                now - self.last_send_time > 3 and \
                now - self.last_ping_time > self.config.http2_ping_min_interval:

            if self.ping_on_way > 0:
                self.close("active timeout")
                return

            self.send_ping()