def recv(self): """Non-blocking network receive. Return response if available """ assert not self._processing, 'Recursion not supported' if not self.connected() and not self.state is ConnectionStates.AUTHENTICATING: log.warning('%s cannot recv: socket not connected', self) # If requests are pending, we should close the socket and # fail all the pending request futures if self.in_flight_requests: self.close() return None elif not self.in_flight_requests: log.warning('%s: No in-flight-requests to recv', self) return None response = self._recv() if not response and self.requests_timed_out(): log.warning('%s timed out after %s ms. Closing connection.', self, self.config['request_timeout_ms']) self.close(error=Errors.RequestTimedOutError( 'Request timed out after %s ms' % self.config['request_timeout_ms'])) return None return response
def recv(self): """Non-blocking network receive. Return list of (response, future) """ if not self.connected() and not self.state is ConnectionStates.AUTHENTICATING: log.warning('%s cannot recv: socket not connected', self) # If requests are pending, we should close the socket and # fail all the pending request futures if self.in_flight_requests: self.close(Errors.ConnectionError('Socket not connected during recv with in-flight-requests')) return () elif not self.in_flight_requests: log.warning('%s: No in-flight-requests to recv', self) return () responses = self._recv() if not responses and self.requests_timed_out(): log.warning('%s timed out after %s ms. Closing connection.', self, self.config['request_timeout_ms']) self.close(error=Errors.RequestTimedOutError( 'Request timed out after %s ms' % self.config['request_timeout_ms'])) return () # augment respones w/ correlation_id, future, and timestamp for i, response in enumerate(responses): (correlation_id, future, timestamp) = self.in_flight_requests.popleft() latency_ms = (time.time() - timestamp) * 1000 if self._sensors: self._sensors.request_time.record(latency_ms) log.debug('%s Response %d (%s ms): %s', self, correlation_id, latency_ms, response) responses[i] = (response, future) return responses
def _poll(self, timeout): """Returns list of (response, future) tuples""" processed = set() start_select = time.time() ready = self._selector.select(timeout) end_select = time.time() if self._sensors: self._sensors.select_time.record( (end_select - start_select) * 1000000000) for key, events in ready: if key.fileobj is self._wake_r: self._clear_wake_fd() continue elif not (events & selectors.EVENT_READ): continue conn = key.data processed.add(conn) if not conn.in_flight_requests: # if we got an EVENT_READ but there were no in-flight requests, one of # two things has happened: # # 1. The remote end closed the connection (because it died, or because # a firewall timed out, or whatever) # 2. The protocol is out of sync. # # either way, we can no longer safely use this connection # # Do a 1-byte read to check protocol didnt get out of sync, and then close the conn try: unexpected_data = key.fileobj.recv(1) if unexpected_data: # anything other than a 0-byte read means protocol issues log.warning('Protocol out of sync on %r, closing', conn) except socket.error: pass conn.close( Errors.ConnectionError( 'Socket EVENT_READ without in-flight-requests')) continue self._idle_expiry_manager.update(conn.node_id) self._pending_completion.extend(conn.recv()) # Check for additional pending SSL bytes if self.config['security_protocol'] in ('SSL', 'SASL_SSL'): # TODO: optimize for conn in self._conns.values(): if conn not in processed and conn.connected( ) and conn._sock.pending(): self._pending_completion.extend(conn.recv()) for conn in six.itervalues(self._conns): if conn.requests_timed_out(): log.warning('%s timed out after %s ms. Closing connection.', conn, conn.config['request_timeout_ms']) conn.close(error=Errors.RequestTimedOutError( 'Request timed out after %s ms' % conn.config['request_timeout_ms'])) if self._sensors: self._sensors.io_time.record( (time.time() - end_select) * 1000000000) self._maybe_close_oldest_connection()
def recv(self): """Non-blocking network receive. Return response if available """ assert not self._processing, 'Recursion not supported' if not self.connected(): log.warning('%s cannot recv: socket not connected', self) # If requests are pending, we should close the socket and # fail all the pending request futures if self.in_flight_requests: self.close() return None elif not self.in_flight_requests: log.warning('%s: No in-flight-requests to recv', self) return None elif self._requests_timed_out(): log.warning('%s timed out after %s ms. Closing connection.', self, self.config['request_timeout_ms']) self.close(error=Errors.RequestTimedOutError( 'Request timed out after %s ms' % self.config['request_timeout_ms'])) return None # Not receiving is the state of reading the payload header if not self._receiving: try: bytes_to_read = 4 - self._rbuffer.tell() data = self._sock.recv(bytes_to_read) # We expect socket.recv to raise an exception if there is not # enough data to read the full bytes_to_read # but if the socket is disconnected, we will get empty data # without an exception raised if not data: log.error('%s: socket disconnected', self) self.close(error=Errors.ConnectionError('socket disconnected')) return None self._rbuffer.write(data) except ssl.SSLWantReadError: return None except ConnectionError as e: if six.PY2 and e.errno == errno.EWOULDBLOCK: return None log.exception('%s: Error receiving 4-byte payload header -' ' closing socket', self) self.close(error=Errors.ConnectionError(e)) return None except BlockingIOError: if six.PY3: return None raise if self._rbuffer.tell() == 4: self._rbuffer.seek(0) self._next_payload_bytes = Int32.decode(self._rbuffer) # reset buffer and switch state to receiving payload bytes self._rbuffer.seek(0) self._rbuffer.truncate() self._receiving = True elif self._rbuffer.tell() > 4: raise Errors.KafkaError('this should not happen - are you threading?') if self._receiving: staged_bytes = self._rbuffer.tell() try: bytes_to_read = self._next_payload_bytes - staged_bytes data = self._sock.recv(bytes_to_read) # We expect socket.recv to raise an exception if there is not # enough data to read the full bytes_to_read # but if the socket is disconnected, we will get empty data # without an exception raised if not data: log.error('%s: socket disconnected', self) self.close(error=Errors.ConnectionError('socket disconnected')) return None self._rbuffer.write(data) except ssl.SSLWantReadError: return None except ConnectionError as e: # Extremely small chance that we have exactly 4 bytes for a # header, but nothing to read in the body yet if six.PY2 and e.errno == errno.EWOULDBLOCK: return None log.exception('%s: Error in recv', self) self.close(error=Errors.ConnectionError(e)) return None except BlockingIOError: if six.PY3: return None raise staged_bytes = self._rbuffer.tell() if staged_bytes > self._next_payload_bytes: self.close(error=Errors.KafkaError('Receive buffer has more bytes than expected?')) if staged_bytes != self._next_payload_bytes: return None self._receiving = False self._next_payload_bytes = 0 self._rbuffer.seek(0) response = self._process_response(self._rbuffer) self._rbuffer.seek(0) self._rbuffer.truncate() return response
def _poll(self, timeout): # This needs to be locked, but since it is only called from within the # locked section of poll(), there is no additional lock acquisition here processed = set() # Send pending requests first, before polling for responses self._register_send_sockets() start_select = time.time() ready = self._selector.select(timeout) end_select = time.time() if self._sensors: self._sensors.select_time.record( (end_select - start_select) * 1000000000) for key, events in ready: if key.fileobj is self._wake_r: self._clear_wake_fd() continue # Send pending requests if socket is ready to write if events & selectors.EVENT_WRITE: conn = key.data if conn.connecting(): conn.connect() else: if conn.send_pending_requests_v2(): # If send is complete, we dont need to track write readiness # for this socket anymore if key.events ^ selectors.EVENT_WRITE: self._selector.modify( key.fileobj, key.events ^ selectors.EVENT_WRITE, key.data) else: self._selector.unregister(key.fileobj) if not (events & selectors.EVENT_READ): continue conn = key.data processed.add(conn) if not conn.in_flight_requests: # if we got an EVENT_READ but there were no in-flight requests, one of # two things has happened: # # 1. The remote end closed the connection (because it died, or because # a firewall timed out, or whatever) # 2. The protocol is out of sync. # # either way, we can no longer safely use this connection # # Do a 1-byte read to check protocol didnt get out of sync, and then close the conn try: unexpected_data = key.fileobj.recv(1) if unexpected_data: # anything other than a 0-byte read means protocol issues log.warning('Protocol out of sync on %r, closing', conn) except socket.error: pass conn.close( Errors.KafkaConnectionError( 'Socket EVENT_READ without in-flight-requests')) continue self._idle_expiry_manager.update(conn.node_id) self._pending_completion.extend(conn.recv()) # Check for additional pending SSL bytes if self.config['security_protocol'] in ('SSL', 'SASL_SSL'): # TODO: optimize for conn in self._conns.values(): if conn not in processed and conn.connected( ) and conn._sock.pending(): self._pending_completion.extend(conn.recv()) for conn in six.itervalues(self._conns): if conn.requests_timed_out(): log.warning('%s timed out after %s ms. Closing connection.', conn, conn.config['request_timeout_ms']) conn.close(error=Errors.RequestTimedOutError( 'Request timed out after %s ms' % conn.config['request_timeout_ms'])) if self._sensors: self._sensors.io_time.record( (time.time() - end_select) * 1000000000) self._maybe_close_oldest_connection()