def read_frames(self): ''' Read frames from the transport and process them. Some transports may choose to do this in the background, in several threads, and so on. ''' # It's possible in a concurrent environment that our transport handle # has gone away, so handle that cleanly. # TODO: Consider moving this block into Translator base class. In many # ways it belongs there. One of the problems though is that this is # essentially the read loop. Each Transport has different rules for # how to kick this off, and in the case of gevent, this is how a # blocking call to read from the socket is kicked off. if self._transport is None: return # Send a heartbeat (if needed) self._channels[0].send_heartbeat() data = self._transport.read(self._heartbeat) if data is None: return reader = Reader(data) p_channels = set() try: for frame in Frame.read_frames(reader): if self._debug > 1: self.logger.debug("READ: %s", frame) self._frames_read += 1 ch = self.channel(frame.channel_id) ch.buffer_frame(frame) p_channels.add(ch) except Frame.FrameError as e: # Frame error in the peer, disconnect self.close(reply_code=501, reply_text='frame error from %s : %s' % (self._host, str(e)), class_id=0, method_id=0, disconnect=True) raise ConnectionClosed("connection is closed: %s : %s" % (self._close_info['reply_code'], self._close_info['reply_text'])) self._transport.process_channels(p_channels) # HACK: read the buffer contents and re-buffer. Would prefer to pass # buffer back, but there's no good way of asking the total size of the # buffer, comparing to tell(), and then re-buffering. There's also no # ability to clear the buffer up to the current position. It would be # awesome if we could free that memory without a new allocation. if reader.tell() < len(data): self._transport.buffer(data[reader.tell():])
def read_frames(self): ''' Read frames from the transport and process them. Some transports may choose to do this in the background, in several threads, and so on. ''' # It's possible in a concurrent environment that our transport handle # has gone away, so handle that cleanly. # TODO: Consider moving this block into Translator base class. In many # ways it belongs there. One of the problems though is that this is # essentially the read loop. Each Transport has different rules for # how to kick this off, and in the case of gevent, this is how a # blocking call to read from the socket is kicked off. if self._transport is None: return # Send a heartbeat (if needed) self._channels[0].send_heartbeat() data = self._transport.read(self._heartbeat) if data is None: return reader = Reader(data) p_channels = set() try: for frame in Frame.read_frames(reader): if self._debug > 1: self.logger.debug("READ: %s", frame) self._frames_read += 1 ch = self.channel(frame.channel_id) ch.buffer_frame(frame) p_channels.add(ch) except Frame.FrameError as e: # Frame error in the peer, disconnect self.close(reply_code=501, reply_text='frame error from %s : %s' % ( self._host, str(e)), class_id=0, method_id=0, disconnect=True) raise ConnectionClosed("connection is closed: %s : %s" % (self._close_info['reply_code'], self._close_info['reply_text'])) self._transport.process_channels(p_channels) # HACK: read the buffer contents and re-buffer. Would prefer to pass # buffer back, but there's no good way of asking the total size of the # buffer, comparing to tell(), and then re-buffering. There's also no # ability to clear the buffer up to the current position. It would be # awesome if we could free that memory without a new allocation. if reader.tell() < len(data): self._transport.buffer(data[reader.tell():])
def test_write_frame_slow_for_standard_properties(self): HeaderFrame.DEFAULT_PROPERTIES = False bit_field = 0 properties = {} now = datetime.utcfromtimestamp( long(time.mktime(datetime.now().timetuple()))) for pname, ptype, reader, writer, mask in HeaderFrame.PROPERTIES: bit_field |= mask if ptype == 'shortstr': properties[pname] = pname elif ptype == 'octet': properties[pname] = 42 elif ptype == 'timestamp': properties[pname] = now elif ptype == 'table': properties[pname] = {'foo': 'bar'} frame = HeaderFrame(42, 5, 6, 7, properties) buf = bytearray() frame.write_frame(buf) HeaderFrame.DEFAULT_PROPERTIES = True reader = Reader(buf) assert_equals(2, reader.read_octet()) assert_equals(42, reader.read_short()) size = reader.read_long() start_pos = reader.tell() assert_equals(5, reader.read_short()) assert_equals(6, reader.read_short()) assert_equals(7, reader.read_longlong()) assert_equals(0b1111111111111100, reader.read_short()) for pname, ptype, rfunc, wfunc, mask in HeaderFrame.PROPERTIES: if ptype == 'shortstr': assertEquals(pname, reader.read_shortstr()) elif ptype == 'octet': assertEquals(42, reader.read_octet()) elif ptype == 'timestamp': assertEquals(now, reader.read_timestamp()) elif ptype == 'table': assertEquals({'foo': 'bar'}, reader.read_table()) end_pos = reader.tell() assert_equals(size, end_pos - start_pos) assert_equals(0xce, reader.read_octet())
def test_write_frame(self): args = mock() expect(args.buffer).returns('hello') frame = MethodFrame(42, 5, 6, args) buf = bytearray() frame.write_frame(buf) reader = Reader(buf) assert_equals(1, reader.read_octet()) assert_equals(42, reader.read_short()) size = reader.read_long() start_pos = reader.tell() assert_equals(5, reader.read_short()) assert_equals(6, reader.read_short()) args_pos = reader.tell() assert_equals('hello', reader.read(size - (args_pos - start_pos))) assert_equals(0xce, reader.read_octet())
def _read_frames(self): ''' Read frames from the socket. ''' # Because of the timer callback to dataRead when we re-buffered, there's a # chance that in between we've lost the socket. If that's the case, just # silently return as some code elsewhere would have already notified us. # That bug could be fixed by improving the message reading so that we consume # all possible messages and ensure that only a partial message was rebuffered, # so that we can rely on the next read event to read the subsequent message. if self._sock is None: return data = self._sock.read() reader = Reader( data ) p_channels = set() try: frames = Frame.read_frames( reader ) except Frame.FrameError as e: self.logger.exception( "Framing error", exc_info=True ) for frame in frames: if self._debug > 1: self.logger.debug( "READ: %s", frame ) self._frames_read += 1 ch = self.channel( frame.channel_id ) ch.buffer_frame( frame ) p_channels.add( ch ) # Still not clear on what's the best approach here. It seems there's a # slight speedup by calling this directly rather than delaying, but the # delay allows for pending IO with higher priority to execute. self._process_channels( p_channels ) #event.timeout(0, self._process_channels, p_channels) # HACK: read the buffer contents and re-buffer. Would prefer to pass # buffer back, but there's no good way of asking the total size of the # buffer, comparing to tell(), and then re-buffering. There's also no # ability to clear the buffer up to the current position. # NOTE: This will be cleared up once eventsocket supports the # uber-awesome buffering scheme that will utilize mmap. if reader.tell() < len(data): self._sock.buffer( data[reader.tell():] )
def _read_frames(self): ''' Read frames from the socket. ''' # Because of the timer callback to dataRead when we re-buffered, there's a # chance that in between we've lost the socket. If that's the case, just # silently return as some code elsewhere would have already notified us. # That bug could be fixed by improving the message reading so that we consume # all possible messages and ensure that only a partial message was rebuffered, # so that we can rely on the next read event to read the subsequent message. if self._sock is None: return data = self._sock.read() reader = Reader(data) p_channels = set() for frame in Frame.read_frames(reader): if self._debug > 1: self.logger.debug("READ: %s", frame) self._frames_read += 1 ch = self.channel(frame.channel_id) ch.buffer_frame(frame) p_channels.add(ch) # Still not clear on what's the best approach here. It seems there's a # slight speedup by calling this directly rather than delaying, but the # delay allows for pending IO with higher priority to execute. self._process_channels(p_channels) #event.timeout(0, self._process_channels, p_channels) # HACK: read the buffer contents and re-buffer. Would prefer to pass # buffer back, but there's no good way of asking the total size of the # buffer, comparing to tell(), and then re-buffering. There's also no # ability to clear the buffer up to the current position. # NOTE: This will be cleared up once eventsocket supports the # uber-awesome buffering scheme that will utilize mmap. if reader.tell() < len(data): self._sock.buffer(data[reader.tell():])
def test_tell(self): r = Reader('') r._pos = 'foo' assert_equals('foo', r.tell())
def read_frames(self): ''' Read frames from the transport and process them. Some transports may choose to do this in the background, in several threads, and so on. ''' # It's possible in a concurrent environment that our transport handle # has gone away, so handle that cleanly. # TODO: Consider moving this block into Translator base class. In many # ways it belongs there. One of the problems though is that this is # essentially the read loop. Each Transport has different rules for # how to kick this off, and in the case of gevent, this is how a # blocking call to read from the socket is kicked off. if self._transport is None: return # Send a heartbeat (if needed) self._channels[0].send_heartbeat() data = self._transport.read(self._heartbeat) current_time = time.time() if data is None: # Wait for 2 heartbeat intervals before giving up. See AMQP 4.2.7: # "If a peer detects no incoming traffic (i.e. received octets) for two heartbeat intervals or longer, # it should close the connection" if self._heartbeat and (current_time-self._last_octet_time > 2*self._heartbeat): msg = 'Heartbeats not received from %s for %d seconds' % (self._host, 2*self._heartbeat) self.transport_closed(msg=msg) raise ConnectionClosed('Connection is closed: ' + msg) return self._last_octet_time = current_time reader = Reader(data) p_channels = set() try: for frame in Frame.read_frames(reader): if self._debug > 1: self.logger.debug("READ: %s", frame) self._frames_read += 1 ch = self.channel(frame.channel_id) ch.buffer_frame(frame) p_channels.add(ch) except Frame.FrameError as e: # Frame error in the peer, disconnect self.close(reply_code=501, reply_text='frame error from %s : %s' % ( self._host, str(e)), class_id=0, method_id=0, disconnect=True) raise ConnectionClosed("connection is closed: %s : %s" % (self._close_info['reply_code'], self._close_info['reply_text'])) # NOTE: we process channels after buffering unused data in order to # preserve the integrity of the input stream in case a channel needs to # read input, such as when a channel framing error necessitates the use # of the synchronous channel.close method. See `Channel.process_frames`. # # HACK: read the buffer contents and re-buffer. Would prefer to pass # buffer back, but there's no good way of asking the total size of the # buffer, comparing to tell(), and then re-buffering. There's also no # ability to clear the buffer up to the current position. It would be # awesome if we could free that memory without a new allocation. if reader.tell() < len(data): self._transport.buffer(data[reader.tell():]) self._transport.process_channels(p_channels)