def test_send_without_response(self): """Imitate producer without acknowledge, in this case client produces messages and kafka does not send response, and we make sure that futures do not stuck in queue forever""" host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) # prepare message builder = LegacyRecordBatchBuilder( magic=1, compression_type=0, batch_size=99999999) builder.append(offset=0, value=b"foo", key=None, timestamp=None) request = ProduceRequest( required_acks=0, timeout=10 * 1000, topics=[(b'foo', [(0, bytes(builder.build()))])]) # produce messages without acknowledge req = [] for i in range(10): req.append(conn.send(request, expect_response=False)) # make sure futures no stuck in queue self.assertEqual(len(conn._requests), 0) for x in req: yield from x conn.close()
def test_connections_max_idle_ms(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn( host, port, loop=self.loop, max_idle_ms=200) self.assertEqual(conn.connected(), True) yield from asyncio.sleep(0.1, loop=self.loop) # Do some work request = MetadataRequest([]) yield from conn.send(request) yield from asyncio.sleep(0.15, loop=self.loop) # Check if we're stil connected after 250ms, as we were not idle self.assertEqual(conn.connected(), True) # It shouldn't break if we have a long running call either readexactly = conn._reader.readexactly with mock.patch.object(conn._reader, 'readexactly') as mocked: @asyncio.coroutine def long_read(n): yield from asyncio.sleep(0.2, loop=self.loop) return (yield from readexactly(n)) mocked.side_effect = long_read yield from conn.send(MetadataRequest([])) self.assertEqual(conn.connected(), True) yield from asyncio.sleep(0.2, loop=self.loop) self.assertEqual(conn.connected(), False)
def _get_conn(self, node_id): "Get or create a connection to a broker using host and port" if node_id in self._conns: conn = self._conns[node_id] if not conn.connected(): del self._conns[node_id] else: return conn try: broker = self.cluster.broker_metadata(node_id) assert broker, 'Broker id %s not in current metadata' % node_id log.debug("Initiating connection to node %s at %s:%s", node_id, broker.host, broker.port) with (yield from self._get_conn_lock): if node_id in self._conns: return self._conns[node_id] self._conns[node_id] = yield from create_conn( broker.host, broker.port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to node with id %s: %s', node_id, err) return None else: return self._conns[node_id]
def test_send_without_response(self): """Imitate producer without acknowledge, in this case client produces messages and kafka does not send response, and we make sure that futures do not stuck in queue forever""" host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) # prepare message msgs = create_message_set([b'foo'], 0, None) req = ProduceRequest(b'bar', 0, msgs) encoder = functools.partial( KafkaProtocol.encode_produce_request, acks=0, timeout=int(10*1000)) request_id = 1 client_id = b"aiokafka-python" request = encoder(client_id=client_id, correlation_id=request_id, payloads=[req]) # produce messages without acknowledge for i in range(100): conn.send(request, no_ack=True) # make sure futures no stuck in queue self.assertEqual(len(conn._requests), 0)
def _get_conn(self, node_id, *, group=ConnectionGroup.DEFAULT, no_hint=False): "Get or create a connection to a broker using host and port" conn_id = (node_id, group) if conn_id in self._conns: conn = self._conns[conn_id] if not conn.connected(): del self._conns[conn_id] else: return conn try: if group == ConnectionGroup.DEFAULT: broker = self.cluster.broker_metadata(node_id) # XXX: earlier we only did an assert here, but it seems it's # possible to get a leader that is for some reason not in # metadata. # I think requerying metadata should solve this problem if broker is None: raise StaleMetadata( 'Broker id %s not in current metadata' % node_id) else: broker = self.cluster.coordinator_metadata(node_id) assert broker is not None log.debug("Initiating connection to node %s at %s:%s", node_id, broker.host, broker.port) with (yield from self._get_conn_lock): if conn_id in self._conns: return self._conns[conn_id] version_hint = self._api_version if version_hint == "auto" or no_hint: version_hint = None self._conns[conn_id] = yield from create_conn( broker.host, broker.port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, on_close=self._on_connection_closed, max_idle_ms=self._connections_max_idle_ms, sasl_mechanism=self._sasl_mechanism, sasl_plain_username=self._sasl_plain_username, sasl_plain_password=self._sasl_plain_password, sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501 sasl_kerberos_domain_name=self._sasl_kerberos_domain_name, version_hint=version_hint ) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to node with id %s: %s', node_id, err) if group == ConnectionGroup.DEFAULT: # Connection failures imply that our metadata is stale, so # let's refresh self.force_metadata_update() return None else: return self._conns[conn_id]
def test_global_loop_for_create_conn(self): asyncio.set_event_loop(self.loop) host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port) self.assertIs(conn._loop, self.loop) conn.close() # make sure second closing does nothing and we have full coverage # of *if self._reader:* condition conn.close()
def test_basic_connection_load_meta(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) self.assertEqual(conn.connected(), True) request = MetadataRequest([]) response = yield from conn.send(request) conn.close() self.assertIsInstance(response, MetadataResponse)
def test_basic_connection_load_meta(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) self.assertEqual(conn.connected(), True) request = MetadataRequest([]) response = yield from conn.send(request) conn.close() self.assertIsInstance(response, MetadataResponse)
def test_global_loop_for_create_conn(self): asyncio.set_event_loop(self.loop) host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port) self.assertIs(conn._loop, self.loop) conn.close() # make sure second closing does nothing and we have full coverage # of *if self._reader:* condition conn.close()
def test_conn_warn_unclosed(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn( host, port, loop=self.loop, max_idle_ms=100000) with self.silence_loop_exception_handler(): with self.assertWarnsRegex( ResourceWarning, "Unclosed AIOKafkaConnection"): del conn gc.collect()
def bootstrap(self): """Try to to bootstrap initial cluster metadata""" # using request v0 for bootstap (bcs api version is not detected yet) metadata_request = MetadataRequest[0]([]) for host, port, _ in self.hosts: log.debug("Attempting to bootstrap via node at %s:%s", host, port) try: bootstrap_conn = yield from create_conn( host, port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, max_idle_ms=self._connections_max_idle_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) continue try: metadata = yield from bootstrap_conn.send(metadata_request) except KafkaError as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() continue self.cluster.update_metadata(metadata) # A cluster with no topics can return no broker metadata... # In that case, we should keep the bootstrap connection till # we get a normal cluster layout. if not len(self.cluster.brokers()): self._conns['bootstrap'] = bootstrap_conn else: bootstrap_conn.close() log.debug('Received cluster metadata: %s', self.cluster) break else: raise ConnectionError('Unable to bootstrap from {}'.format( self.hosts)) # detect api version if need if self._api_version == 'auto': self._api_version = yield from self.check_version() if type(self._api_version) is not tuple: self._api_version = tuple(map(int, self._api_version.split('.'))) if self._sync_task is None: # starting metadata synchronizer task self._sync_task = ensure_future(self._md_synchronizer(), loop=self._loop)
def test_conn_warn_unclosed(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop, max_idle_ms=100000) with self.silence_loop_exception_handler(): with self.assertWarnsRegex(ResourceWarning, "Unclosed AIOKafkaConnection"): del conn gc.collect()
def _get_conn(self, node_id, *, group=ConnectionGroup.DEFAULT): "Get or create a connection to a broker using host and port" conn_id = (node_id, group) if conn_id in self._conns: conn = self._conns[conn_id] if not conn.connected(): del self._conns[conn_id] else: return conn try: if group == ConnectionGroup.DEFAULT: broker = self.cluster.broker_metadata(node_id) # XXX: earlier we only did an assert here, but it seems it's # possible to get a leader that is for some reason not in # metadata. # I think requerying metadata should solve this problem if broker is None: raise StaleMetadata( 'Broker id %s not in current metadata' % node_id) else: broker = self.cluster.coordinator_metadata(node_id) assert broker is not None log.debug("Initiating connection to node %s at %s:%s", node_id, broker.host, broker.port) with (yield from self._get_conn_lock): if conn_id in self._conns: return self._conns[conn_id] self._conns[conn_id] = yield from create_conn( broker.host, broker.port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, on_close=self._on_connection_closed, max_idle_ms=self._connections_max_idle_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to node with id %s: %s', node_id, err) if group == ConnectionGroup.DEFAULT: # Connection failures imply that our metadata is stale, so # let's refresh self.force_metadata_update() return None else: return self._conns[conn_id]
def test_basic_connection_load_meta(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request decoder = KafkaProtocol.decode_metadata_response request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut = conn.send(request) raw_response = yield from fut response = decoder(raw_response) conn.close() self.assertIsInstance(response, MetadataResponse)
def test_basic_connection_load_meta(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request decoder = KafkaProtocol.decode_metadata_response request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut = conn.send(request) raw_response = yield from fut response = decoder(raw_response) conn.close() self.assertIsInstance(response, MetadataResponse)
def test_send_without_response(self): """Imitate producer without acknowledge, in this case client produces messages and kafka does not send response, and we make sure that futures do not stuck in queue forever""" host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) # prepare message msg = Message(b'foo') request = ProduceRequest(required_acks=0, timeout=10*1000, topics=[(b'foo', [(0, [(0, 0, msg)])])]) # produce messages without acknowledge for i in range(100): conn.send(request, expect_response=False) # make sure futures no stuck in queue self.assertEqual(len(conn._requests), 0)
def test_pending_futures(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut1 = conn.send(request) fut2 = conn.send(request) fut3 = conn.send(request) conn.close() self.assertTrue(fut1.cancelled()) self.assertTrue(fut2.cancelled()) self.assertTrue(fut3.cancelled())
def bootstrap(self): """Try to to bootstrap initial cluster metadata""" metadata_request = MetadataRequest([]) for host, port, _ in self.hosts: log.debug("Attempting to bootstrap via node at %s:%s", host, port) try: bootstrap_conn = yield from create_conn( host, port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) continue try: metadata = yield from bootstrap_conn.send(metadata_request) except KafkaError as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() continue self.cluster.update_metadata(metadata) # A cluster with no topics can return no broker metadata # in that case, we should keep the bootstrap connection if not len(self.cluster.brokers()): self._conns['bootstrap'] = bootstrap_conn else: bootstrap_conn.close() log.debug('Received cluster metadata: %s', self.cluster) break else: raise ConnectionError('Unable to bootstrap from {}'.format( self.hosts)) if self._sync_task is None: # starting metadata synchronizer task self._sync_task = ensure_future(self._md_synchronizer(), loop=self._loop)
def test_send_without_response(self): """Imitate producer without acknowledge, in this case client produces messages and kafka does not send response, and we make sure that futures do not stuck in queue forever""" host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) # prepare message msg = Message(b'foo') request = ProduceRequest(required_acks=0, timeout=10 * 1000, topics=[(b'foo', [(0, [(0, 0, msg)])])]) # produce messages without acknowledge for i in range(100): conn.send(request, expect_response=False) # make sure futures no stuck in queue self.assertEqual(len(conn._requests), 0)
def test_pending_futures(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut1 = conn.send(request) fut2 = conn.send(request) fut3 = conn.send(request) conn.close() self.assertTrue(fut1.cancelled()) self.assertTrue(fut2.cancelled()) self.assertTrue(fut3.cancelled())
def bootstrap(self): """Try to to bootstrap initial cluster metadata""" metadata_request = MetadataRequest([]) for host, port, _ in self.hosts: log.debug("Attempting to bootstrap via node at %s:%s", host, port) try: bootstrap_conn = yield from create_conn( host, port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) continue try: metadata = yield from bootstrap_conn.send(metadata_request) except KafkaError as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() continue self.cluster.update_metadata(metadata) # A cluster with no topics can return no broker metadata # in that case, we should keep the bootstrap connection if not len(self.cluster.brokers()): self._conns['bootstrap'] = bootstrap_conn else: bootstrap_conn.close() log.debug('Received cluster metadata: %s', self.cluster) break else: raise ConnectionError( 'Unable to bootstrap from {}'.format(self.hosts)) if self._sync_task is None: # starting metadata synchronizer task self._sync_task = ensure_future( self._md_synchronizer(), loop=self._loop)
def test_send_cancelled(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request decoder = KafkaProtocol.decode_metadata_response request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut = conn.send(request) fut.cancel() asyncio.sleep(0.1, loop=self.loop) self.assertTrue(fut.cancelled()) # make sure that connections still working raw_response = yield from conn.send(request) response = decoder(raw_response) self.assertIsInstance(response, MetadataResponse) conn.close()
def test_send_cancelled(self): host, port = self.server.host, self.server.port conn = yield from create_conn(host, port, loop=self.loop) encoder = KafkaProtocol.encode_metadata_request decoder = KafkaProtocol.decode_metadata_response request_id = 1 client_id = b"aiokafka-python" payloads = () request = encoder(client_id=client_id, correlation_id=request_id, payloads=payloads) fut = conn.send(request) fut.cancel() asyncio.sleep(0.1, loop=self.loop) self.assertTrue(fut.cancelled()) # make sure that connections still working raw_response = yield from conn.send(request) response = decoder(raw_response) self.assertIsInstance(response, MetadataResponse) conn.close()
def _get_conn(self, node_id, *, group=ConnectionGroup.DEFAULT): "Get or create a connection to a broker using host and port" conn_id = (node_id, group) if conn_id in self._conns: conn = self._conns[conn_id] if not conn.connected(): del self._conns[conn_id] else: return conn try: broker = self.cluster.broker_metadata(node_id) assert broker, 'Broker id %s not in current metadata' % node_id log.debug("Initiating connection to node %s at %s:%s", node_id, broker.host, broker.port) with (yield from self._get_conn_lock): if conn_id in self._conns: return self._conns[conn_id] self._conns[conn_id] = yield from create_conn( broker.host, broker.port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, on_close=self._on_connection_closed, max_idle_ms=self._connections_max_idle_ms) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to node with id %s: %s', node_id, err) # Connection failures imply that our metadata is stale, so let's # refresh self.force_metadata_update() return None else: return self._conns[conn_id]
def test_send_without_response(self): """Imitate producer without acknowledge, in this case client produces messages and kafka does not send response, and we make sure that futures do not stuck in queue forever""" host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) # prepare message builder = LegacyRecordBatchBuilder(magic=1, compression_type=0, batch_size=99999999) builder.append(offset=0, value=b"foo", key=None, timestamp=None) request = ProduceRequest(required_acks=0, timeout=10 * 1000, topics=[(b'foo', [(0, bytes(builder.build())) ])]) # produce messages without acknowledge for i in range(100): conn.send(request, expect_response=False) # make sure futures no stuck in queue self.assertEqual(len(conn._requests), 0) conn.close()
def bootstrap(self): """Try to to bootstrap initial cluster metadata""" # using request v0 for bootstap if not sure v1 is available if self._api_version == "auto" or self._api_version < (0, 10): metadata_request = MetadataRequest[0]([]) else: metadata_request = MetadataRequest[1]([]) version_hint = None if self._api_version != "auto": version_hint = self._api_version for host, port, _ in self.hosts: log.debug("Attempting to bootstrap via node at %s:%s", host, port) try: bootstrap_conn = yield from create_conn( host, port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, max_idle_ms=self._connections_max_idle_ms, sasl_mechanism=self._sasl_mechanism, sasl_plain_username=self._sasl_plain_username, sasl_plain_password=self._sasl_plain_password, sasl_kerberos_service_name=self. _sasl_kerberos_service_name, # noqa: ignore=E501 sasl_kerberos_domain_name=self._sasl_kerberos_domain_name, version_hint=version_hint) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) continue try: metadata = yield from bootstrap_conn.send(metadata_request) except KafkaError as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() continue self.cluster.update_metadata(metadata) # A cluster with no topics can return no broker metadata... # In that case, we should keep the bootstrap connection till # we get a normal cluster layout. if not len(self.cluster.brokers()): bootstrap_id = ('bootstrap', ConnectionGroup.DEFAULT) self._conns[bootstrap_id] = bootstrap_conn else: bootstrap_conn.close() log.debug('Received cluster metadata: %s', self.cluster) break else: raise ConnectionError('Unable to bootstrap from {}'.format( self.hosts)) # detect api version if need if self._api_version == 'auto': self._api_version = yield from self.check_version() if self._sync_task is None: # starting metadata synchronizer task self._sync_task = ensure_future(self._md_synchronizer(), loop=self._loop)
def test_close_disconnects_connection(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) self.assertTrue(conn.connected()) conn.close() self.assertFalse(conn.connected())
def test_close_disconnects_connection(self): host, port = self.kafka_host, self.kafka_port conn = yield from create_conn(host, port, loop=self.loop) self.assertTrue(conn.connected()) conn.close() self.assertFalse(conn.connected())
def bootstrap(self): """Try to to bootstrap initial cluster metadata""" # using request v0 for bootstap if not sure v1 is available if self._api_version == "auto" or self._api_version < (0, 10): metadata_request = MetadataRequest[0]([]) else: metadata_request = MetadataRequest[1]([]) version_hint = None if self._api_version != "auto": version_hint = self._api_version for host, port, _ in self.hosts: log.debug("Attempting to bootstrap via node at %s:%s", host, port) try: bootstrap_conn = yield from create_conn( host, port, loop=self._loop, client_id=self._client_id, request_timeout_ms=self._request_timeout_ms, ssl_context=self._ssl_context, security_protocol=self._security_protocol, max_idle_ms=self._connections_max_idle_ms, sasl_mechanism=self._sasl_mechanism, sasl_plain_username=self._sasl_plain_username, sasl_plain_password=self._sasl_plain_password, sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501 sasl_kerberos_domain_name=self._sasl_kerberos_domain_name, version_hint=version_hint) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) continue try: metadata = yield from bootstrap_conn.send(metadata_request) except KafkaError as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() continue self.cluster.update_metadata(metadata) # A cluster with no topics can return no broker metadata... # In that case, we should keep the bootstrap connection till # we get a normal cluster layout. if not len(self.cluster.brokers()): bootstrap_id = ('bootstrap', ConnectionGroup.DEFAULT) self._conns[bootstrap_id] = bootstrap_conn else: bootstrap_conn.close() log.debug('Received cluster metadata: %s', self.cluster) break else: raise ConnectionError( 'Unable to bootstrap from {}'.format(self.hosts)) # detect api version if need if self._api_version == 'auto': self._api_version = yield from self.check_version() if self._sync_task is None: # starting metadata synchronizer task self._sync_task = ensure_future( self._md_synchronizer(), loop=self._loop)