def test_any_method_after_close_throws_exception(): """ Calling any consumer method after close should thorw a RuntimeError """ c = Consumer({'group.id': 'test', 'enable.auto.commit': True, 'enable.auto.offset.store': False, 'socket.timeout.ms': 50, 'session.timeout.ms': 100}) c.subscribe(["test"]) c.unsubscribe() c.close() with pytest.raises(RuntimeError) as ex: c.subscribe(['test']) assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.unsubscribe() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.poll() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.consume() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.assign([TopicPartition('test', 0)]) assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.unassign() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.assignment() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.commit() assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.committed([TopicPartition("test", 0)]) assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.position([TopicPartition("test", 0)]) assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: c.seek([TopicPartition("test", 0, 0)]) assert ex.match('Consumer closed') with pytest.raises(RuntimeError) as ex: lo, hi = c.get_watermark_offsets(TopicPartition("test", 0)) assert ex.match('Consumer closed')
def test_any_method_after_close_throws_exception(): """ Calling any consumer method after close should thorw a RuntimeError """ c = Consumer({'group.id': 'test', 'enable.auto.commit': True, 'enable.auto.offset.store': False, 'socket.timeout.ms': 50, 'session.timeout.ms': 100}) c.subscribe(["test"]) c.unsubscribe() c.close() with pytest.raises(RuntimeError) as ex: c.subscribe(['test']) assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.unsubscribe() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.poll() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.consume() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.assign([TopicPartition('test', 0)]) assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.unassign() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.assignment() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.commit() assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.committed([TopicPartition("test", 0)]) assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.position([TopicPartition("test", 0)]) assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: c.seek([TopicPartition("test", 0, 0)]) assert 'Consumer closed' == str(ex.value) with pytest.raises(RuntimeError) as ex: lo, hi = c.get_watermark_offsets(TopicPartition("test", 0)) assert 'Consumer closed' == str(ex.value)
def kafka_consumer(self): props = { 'bootstrap.servers': self.ip_port, # Kafka集群在那裡? (置換成要連接的Kafka集群) 'group.id': 'goodgo', 'auto.offset.reset': 'earliest', # Offset從最前面開始 } # 步驟2. 產生一個Kafka的Consumer的實例 consumer = Consumer(props) # 步驟3. 指定想要訂閱訊息的topic名稱 topicName = 'LongMoonTest' # 步驟4. 讓Consumer向Kafka集群訂閱指定的topic consumer.subscribe([topicName], on_assign=self.print_assignment, on_revoke=self.print_revoke) count = 0 # 紀錄筆數 while True: records = consumer.consume(num_messages=500, timeout=1.0) # 批次讀取 if len(records) > 0: count += 1 for record in records: topic = record.topic() partition = record.partition() offset = record.offset() # 取出msgKey與msgValue msgKey = self.try_decode_utf8(record.key()) msgValue = self.try_decode_utf8(record.value()) print('%s-%d-%d : (%s , %s)' % (topic, partition, offset, msgKey, msgValue)) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip_port = "http://"+s.getsockname()[0] + ":9200" savedata_els(http_ip_port=ip_port,msgValue=msgValue) s.close()
def _consume(group_id, topic, n, max_messages): config = { "bootstrap.servers": "localhost:9094", "group.id": group_id, "auto.offset.reset": "beginning", "enable.partition.eof": "true", "enable.auto.commit": "false", } consumer = Consumer(config) consumer.subscribe(topics=[topic]) messages = 0 while True: if messages == max_messages: return msg = consumer.consume(num_messages=n, timeout=5) if len(msg) == 0: continue for m in msg: if m.error(): if m.error().code() == KafkaError._PARTITION_EOF: return elif m.error(): raise KafkaException(m.error()) else: messages += 1 if messages == max_messages: break consumer.commit(asynchronous=False)
class kafkawrapper(object): def __init__(self, bootstrap_server): self._bootstrap_server = bootstrap_server def _connect(self,topic): self._topic = topic self._consumer = Consumer({ "bootstrap.servers": self._bootstrap_server, "group.id":"some_test_group", "enable.auto.commit": False, "on_commit": self._log_on_commit, "auto.offset.reset":"beginning" }) def eq(self, nor): self._consumer.subscribe([self._topic]) messages = [] while self._status: record = None records = self._consumer.consume(nor,10.0) if len(records) < nor: self._status = False for r in records: record = self.process_message(r) messages.append(record) return messages def process_message(self, m=None): if m is None: self._status = False return None record = [m.offset(), m.key(), m.value().decode("utf-8")] return record
def _consume(self): """Polls for a message. Returns 1 if a message was received, 0 otherwise""" # # # TODO: Poll Kafka for messages. Make sure to handle any errors or exceptions. # Additionally, make sure you return 1 when a message is processed, and 0 when no message # is retrieved. # # c = Consumer(self.broker_properties) c.subscribe(self.topic_name_pattern) while True: messages = c.consume(10, timeout=1.0) for message in messages: if message is None: print('no message received by consumer') elif message.error() is not None: print(f"error from consumer{message.error()} ") else: print( f"consumed message {mesage.key()}: {message.value()}") return 1 logger.info("_consume is incomplete - skipping") return 0
def get_latest_applied(client_options, topic_name, read_timeout=1.0): client_options.update({ 'auto.offset.reset': 'latest', 'enable.auto.commit': False, }) c = Consumer(client_options) partition = TopicPartition(topic_name, 0) low, high = c.get_watermark_offsets(partition) if low is not None and high is not None and high > 0: last_msg_offset = high - 1 else: last_msg_offset = 0 partition = TopicPartition(topic_name, 0, last_msg_offset) c.assign([partition]) read = None msg = c.consume(num_messages=1, timeout=read_timeout) if msg: read = msg[0].value().decode('utf-8') # print('Read: {}'.format(read)) c.close() return read
async def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) while True: # # TODO: Write a loop that uses consume to grab 5 messages at a time and has a timeout. # See: https://docs.confluent.io/current/clients/confluent-kafka-python/index.html?highlight=partition#confluent_kafka.Consumer.consume # # TODO: Print something to indicate how many messages you've consumed. Print the key and value of # any message(s) you consumed # Do not delete this! messages = c.consume(5, 1.0) for message in messages: if not message: print("No message recieved by consumer") elif message.error(): print(f"error from consumer{message.error}") else: print(f"consumed message {message.key()}: {message.value()}") await asyncio.sleep(0.01)
class KafkaConnector: def __init__(self): ''' auto.offset.reset only applies when a valid offset can't be found. If your consumer group is able to recover committed offsets within a valid offset range the reset policy is not enacted. So change the grou.id if you want to reprocess''' self.kafka_config = { 'bootstrap.servers': os.getenv('BROKER_URL'), 'group.id': uuid.uuid1(), 'auto.offset.reset': 'earliest', 'default.topic.config': {'auto.offset.reset': 'smallest'} } self.consumer = Consumer(self.kafka_config) self.consumer.subscribe(['^cash.*']) print('topic subscribed') def parse_message(self, messages): if len(messages): if messages[0].error(): raise messages[0].error() # Parse the message to a dictionary payload = [json.loads(m.value().decode('ascii')) for m in messages] return payload return None def consume(self, num_messages, timeout): messages = self.consumer.consume( num_messages=num_messages, timeout=timeout ) return messages
class ConsumerServer: BROKER_URL = "PLAINTEXT://localhost:9092" def __init__(self, topic_name): self.consumer = Consumer({ "bootstrap.servers": "PLAINTEXT://localhost:9092", "group.id": "0" }) self.consumer.subscribe([topic_name], on_assign=self.on_assign) def run(self): while True: messages = self.consumer.consume(10, timeout=1) print(f"Received {len(messages)} messages") for m in messages: if not m: continue elif m.error(): print(f"Error: {m.error()}") else: print(m.key(), m.value()) def on_assign(self, consumer, partitions): """Callback for when topic assignment takes place""" for partition in partitions: partition.offset = OFFSET_BEGINNING consumer.assign(partitions)
def Consume(serverXport, topic): c = Consumer({ 'bootstrap.servers': serverXport, 'group.id': 'python', 'group.instance.id': platform.node(), 'client.id': platform.node(), 'auto.offset.reset': 'earliest', }) c.subscribe([topic]) try: print( f'debug: Try to retrieve up to 5 messages from {topic} @ {serverXport}' ) msgs = c.consume(num_messages=5, timeout=30) if len(msgs) < 1: print('alert: No message to consume (also check timeouts)') else: for msg in msgs: print( f"info: Message retrieved from {msg.topic()}: {msg.value().decode('utf-8')}" ) except Exception as e: raise e c.close()
def kafka_consumer(self): props = { 'bootstrap.servers': self.ip_port, # Kafka集群在那裡? (置換成要連接的Kafka集群) 'group.id': 'goodgo', 'auto.offset.reset': 'earliest', # Offset從最前面開始 } # 步驟2. 產生一個Kafka的Consumer的實例 consumer = Consumer(props) # 步驟3. 指定想要訂閱訊息的topic名稱 topicName = 'search_log' # 步驟4. 讓Consumer向Kafka集群訂閱指定的topic consumer.subscribe([topicName], on_assign=self.print_assignment, on_revoke=self.print_revoke) count = 0 # 紀錄筆數 while True: records = consumer.consume(num_messages=500, timeout=1.0) # 批次讀取 if len(records) > 0: count += 1 for record in records: topic = record.topic() partition = record.partition() offset = record.offset() # 取出msgKey與msgValue msgKey = self.try_decode_utf8(record.key()) msgValue = self.try_decode_utf8(record.value()) # 秀出metadata與msgKey & msgValue訊息 print('%s-%d-%d : (%s , %s)' % (topic, partition, offset, msgKey, msgValue)) savedata_els(http_ip_port='http://35.221.163.250:9200', count=count, msgValue=msgValue)
async def consume(topic_name): """Consumes data from the Kafka Topic """ c = Consumer( { "bootstrap.servers": BROKER_URL, "group.id": "sf_crimes" } ) c.subscribe([topic_name]) print(f"successfully subscribed to topic {topic_name}") curr_iteration = 0 # we use this to count ho wmany messages we have consumed while True: messages = c.consume(5, timeout = 1.0) # grab 5 messages for message in messages: if message is None: continue # No data received elif message.error() is not None: print(f"Received an error message from the consumer: Message {message.error()}") else: curr_iteration += 1 print(f"MSG ID {curr_iteration} : key: {message.key()}, value: {message.value()}") await asyncio.sleep(0.01)
class KConsumer: def __init__(self, group_id="Kdefalut", brokers='localhost:32769', session_timeout=10000, socket_timeout=1000): self.consumer_config = dict() self.consumer_config['bootstrap.servers'] = brokers self.consumer_config['group.id'] = group_id self.consumer_config['socket.timeout.ms'] = socket_timeout self.consumer_config['session.timeout.ms'] = session_timeout self.kc = None def create_session(self): try: self.kc = Consumer(self.consumer_config) except KafkaError: print(KafkaError) def consume(self, topic): self.kc.subscribe(topics=[topic]) msg = self.kc.consume(num_messages=1, timeout=-1) # if msg.error: # raise Exception(msg.error) # else: # print(msg) # def lag_report(self): print(msg[0].value()) print(len(msg)) print(type(msg[0].error()))
async def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) while True: # # TODO: Write a loop that uses consume to grab 5 messages at a time and has a timeout. # See: https://docs.confluent.io/current/clients/confluent-kafka-python/index.html?highlight=partition#confluent_kafka.Consumer.consume # messages = c.consume( 5, timeout=1.0 ) # consumes 5 messages at once. in case of poll its just 1 at a time # TODO: Print something to indicate how many messages you've consumed. Print the key and value of # any message(s) you consumed #message = c.poll(1.0) #<-- uncomment to make this work print(f"consumed {len(messages)} messages") for message in messages: if message is None: print(" no message received from consumer") elif message.error() is not None: print(f"error from consumer {message.error()}") else: print(f"consume message {message.key()}: {message.value()}") # Do not delete this! await asyncio.sleep(0.01)
class KafkaConsumer(BaseKafkaConsumer): def __init__(self, bootstrap_servers: List[str], topics: List[str], group_id: str = 'tdm_ingestion', **kwargs): self.bootstrap_servers = ','.join(bootstrap_servers) self.topics = topics self.group_id = group_id params = { 'bootstrap.servers': self.bootstrap_servers, 'group.id': group_id } params.update(kwargs) logger.debug('creating consumer with params %s', params) self.consumer = ConfluentKafkaConsumer(params) self.consumer.subscribe(self.topics) logger.debug('subscribed to topics %s', topics) def poll(self, timeout_s: int = -1, max_records: int = 1) -> List[str]: try: messages = self.consumer.consume(max_records, timeout_s) except (RuntimeError, KafkaError): logger.debug("error consuming messages") return [] return [m.value() for m in messages if m.error() is None]
async def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({ "bootstrap.servers": BROKER_URL, "group.id": "0" }) # Consumer groups consist of one or more consumers c.subscribe([topic_name]) logging.basicConfig(level=logging.DEBUG) while True: messages = c.consume(5, timeout=1.0) # To indicate how many messages have consumed. logging.info(f"consumed {len(messages)} messages") for message in messages: if message is None: logging.warning("no message received by consumer") elif message.error() is not None: logging.error(f"error from consumer {message.error()}") else: logging.info( f"consumed message {message.key()}: {message.value()}") # Do not delete this! await asyncio.sleep(0.01)
def test_can_read_metrics(metrics_consumer: Consumer) -> None: msgs = metrics_consumer.consume(num_messages=1000, timeout=10) assert len(msgs) == 1000 for msg in msgs: assert msg is not None assert msg.error() is None
class LogReader: def __init__(self, bootstrap): self.bootstrap = bootstrap self.consumer = None self.stream = None def init(self, group, topic, partition): self.consumer = Consumer({ "bootstrap.servers": self.bootstrap, "group.id": group, "enable.auto.commit": False, "auto.offset.reset": "earliest", "isolation.level": "read_committed" }) self.consumer.assign( [TopicPartition(topic, partition, OFFSET_BEGINNING)]) self.stream = self.stream_gen() def stream_gen(self): while True: msgs = self.consumer.consume(timeout=10) for msg in msgs: yield msg def read_until(self, check, timeout_s): begin = time.time() while True: if time.time() - begin > timeout_s: raise KafkaException(KafkaError(KafkaError._TIMED_OUT)) for msg in self.stream: offset = msg.offset() value = msg.value().decode('utf-8') key = msg.key().decode('utf-8') if check(offset, key, value): return
def consume_everything(topic): consumer = Consumer({ "bootstrap.servers": "localhost:9092", "group.id": uuid.uuid4() }) topicpart = TopicPartition(topic, 0, 0) consumer.assign([topicpart]) low, high = consumer.get_watermark_offsets(topicpart) return consumer.consume(high - 1)
def poll_everything(topic): consumer = Consumer({ 'bootstrap.servers': 'localhost:9092', 'group.id': uuid.uuid4() }) topicpart = TopicPartition(topic, 0, 0) consumer.assign([topicpart]) low, high = consumer.get_watermark_offsets(topicpart) return consumer.consume(high - 1)
async def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) while True: messages = c.consume(5, timeout=1.0) print(f"consumed {len(messages)} messages") for message in messages: print(f"consume message {message.key()}: {message.value()}") await asyncio.sleep(0.01)
async def consume(topic_name): c = Consumer({'bootstrap.servers': BROKER_URL, 'group.id': 0}) c.subscribe([topic_name]) while True: messages = c.consume(5, timeout=1.0) print(f"consumed {len(messages)} messages") for message in messages: print(f'consumed message: {message.value()}') await asyncio.sleep(0.01)
def kafkaconsumer(server=server, groupid='conseumer', topic='test_request1', ID='User_ID'): global consumer def try_decode_utf8(data): if data: return data.decode('utf-8') else: return None def my_assign(consumer_instance, partitions): for p in partitions: p.offset = 0 consumer_instance.assign(partitions) def error_cb(err): pass props = { 'bootstrap.servers': server, 'group.id': groupid, 'auto.offset.reset': 'earliest', 'session.timeout.ms': 6000, 'error_cb': error_cb } return_answer = {} if consumer is None: consumer = Consumer(props) topicName = topic consumer.subscribe([topicName]) records = [] while len(records) == 0: records = consumer.consume(num_messages=1) if records is None: continue for record in records: if record is None: continue if record.error(): continue else: msgKey = try_decode_utf8(record.key()) msgValue = try_decode_utf8(record.value()) if str(msgKey) != ID: records = [] else: return_answer[msgKey] = msgValue # consumer.close() return return_answer
def messages(self): config = { 'bootstrap.servers': self.bootstrap_servers, "group.id": self.consumer_group, 'enable.auto.commit': True, "fetch.wait.max.ms": 3000, "max.poll.interval.ms": 60000, 'session.timeout.ms': 60000, "on_commit": self._on_send_response, "default.topic.config": { "auto.offset.reset": "latest" } } if self.k_user and self.k_password: config['security.protocol'] = 'SASL_PLAINTEXT' config['sasl.mechanism'] = 'SCRAM-SHA-256' config['sasl.username'] = self.k_user config['sasl.password'] = self.k_password consumer = Consumer(config) if self.from_end: offset = OFFSET_END elif self.from_stored: offset = OFFSET_STORED elif self.from_beginning: offset = OFFSET_BEGINNING elif self.from_invalid: offset = OFFSET_INVALID # offset = OFFSET_END if self.from_end else OFFSET_BEGINNING pt = TopicPartition(self.topic, 0, offset) consumer.assign([pt]) # consumer.seek(pt) try: while True: ret = consumer.consume(num_messages=100, timeout=0.1) if ret is None: print("No message Continue!") continue for msg in ret: if msg.error() is None: # protobuf binary yield msg.value() elif msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: continue else: raise Exception(msg.error()) except Exception as e: print(e) consumer.close() except KeyboardInterrupt: consumer.close()
async def _consume(topic_name): """Consumes produced messages""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) num_consumed = 0 while True: msg = c.consume(timeout=0.001) if msg: num_consumed += 1 if num_consumed % 100 == 0: print(f"consumed {num_consumed} messages") else: await asyncio.sleep(0.01)
def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) while True: messages = c.consume(5, timeout=1.0) print(f"consumed {len(messages)} messages") ii = 0 for ii, message in enumerate(messages): print(f"consume message {message.key()}: {message.value()}") if ii > 0: print('\n')
async def consume(topic_name): c = Consumer({"bootstrap.servers": "localhost:19092", "group.id": "0"}) c.subscribe(["com.luka.project2"]) while True: messages = c.consume(5, timeout=1.0) for msg in messages: if msg is None: print('no msg') elif msg.error() in not None: print(f'Error: {msg.error()}') else: print(f'{msg.value()}\n')
class ConsumerSpout(Spout): outputs = ['sentences'] def initialize(self, stormconf, context): self.consumer = Consumer({ 'bootstrap.servers': 'kafka:9092', 'group.id': 'mygroup', 'auto.offset.reset': 'latest', }) self.consumer.subscribe(['twitter']) def next_tuple(self): msg = self.consumer.consume(num_messages=100, timeout=10) self.emit([[m.value().decode('utf-8') for m in msg]])
async def consume(): consumer = Consumer({ 'bootstrap.servers': 'PLAINTEXT://localhost:9097', 'group.id': 'test-consumer', }) consumer.subscribe(['police.calls']) while True: for message in consumer.consume(): if message is not None: print(f'{message.value()}\n') await asyncio.sleep(1)
async def consume(topic_name): """Consumes data from the Kafka Topic""" c = Consumer({"bootstrap.servers": BROKER_URL, "group.id": "0"}) c.subscribe([topic_name]) while True: messages = c.consume(10, 1.0) for message in messages: if message is None: print("no message received by consumer") elif message.error() is not None: print(f"error from consumer {message.error()}") else: print(f"consumed message {message.value()}") await asyncio.sleep(0.01)
def test_basic_api(): """ Basic API tests, these wont really do anything since there is no broker configured. """ try: kc = Consumer() except TypeError as e: assert str(e) == "expected configuration dict" def dummy_commit_cb(err, partitions): pass kc = Consumer({'group.id': 'test', 'socket.timeout.ms': '100', 'session.timeout.ms': 1000, # Avoid close() blocking too long 'on_commit': dummy_commit_cb}) kc.subscribe(["test"]) kc.unsubscribe() def dummy_assign_revoke(consumer, partitions): pass kc.subscribe(["test"], on_assign=dummy_assign_revoke, on_revoke=dummy_assign_revoke) kc.unsubscribe() msg = kc.poll(timeout=0.001) if msg is None: print('OK: poll() timeout') elif msg.error(): print('OK: consumer error: %s' % msg.error().str()) else: print('OK: consumed message') if msg is not None: assert msg.timestamp() == (TIMESTAMP_NOT_AVAILABLE, -1) msglist = kc.consume(num_messages=10, timeout=0.001) assert len(msglist) == 0, "expected 0 messages, not %d" % len(msglist) with pytest.raises(ValueError) as ex: kc.consume(-100) assert 'num_messages must be between 0 and 1000000 (1M)' == str(ex.value) with pytest.raises(ValueError) as ex: kc.consume(1000001) assert 'num_messages must be between 0 and 1000000 (1M)' == str(ex.value) partitions = list(map(lambda part: TopicPartition("test", part), range(0, 100, 3))) kc.assign(partitions) with pytest.raises(KafkaException) as ex: kc.seek(TopicPartition("test", 0, 123)) assert 'Erroneous state' in str(ex.value) # Verify assignment assignment = kc.assignment() assert partitions == assignment # Pause partitions kc.pause(partitions) # Resume partitions kc.resume(partitions) # Get cached watermarks, should all be invalid. lo, hi = kc.get_watermark_offsets(partitions[0], cached=True) assert lo == -1001 and hi == -1001 assert lo == OFFSET_INVALID and hi == OFFSET_INVALID # Query broker for watermarks, should raise an exception. try: lo, hi = kc.get_watermark_offsets(partitions[0], timeout=0.5, cached=False) except KafkaException as e: assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._WAIT_COORD, KafkaError.LEADER_NOT_AVAILABLE),\ str(e.args([0])) kc.unassign() kc.commit(asynchronous=True) try: kc.commit(asynchronous=False) except KafkaException as e: assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._NO_OFFSET) # Get current position, should all be invalid. kc.position(partitions) assert len([p for p in partitions if p.offset == OFFSET_INVALID]) == len(partitions) try: kc.committed(partitions, timeout=0.001) except KafkaException as e: assert e.args[0].code() == KafkaError._TIMED_OUT try: kc.list_topics(timeout=0.2) except KafkaException as e: assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._TRANSPORT) try: kc.list_topics(topic="hi", timeout=0.1) except KafkaException as e: assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._TRANSPORT) kc.close()