def trustedPeerManager(path, broker_config, topic): backend = IPCBackend(path) admin = kafka.KafkaAdminClient(**broker_config) try: # We can't rely on auto-creating the topic, because it will default to # snappy compression, which Python doesn't natively support and we # don't want to mess with setting up admin.create_topics([ kafka.admin.NewTopic(topic, 1, 3, topic_configs={"compression.type": "gzip"}) ]) except kafka.errors.TopicAlreadyExistsError: pass consumer = kafka.KafkaConsumer(topic, auto_offset_reset='earliest', **broker_config) producer = kafka.KafkaProducer(**broker_config) myIp = socket.gethostbyname(socket.gethostname()) node_info = backend.get("admin_nodeInfo")["result"] enode = node_info["enode"] internal_enode = "%s@%s:30303" % (enode.split("@")[0], myIp) producer.send(topic, internal_enode.encode("utf8")) logger.info("Registered with %s as %s", topic, internal_enode) registered = set([internal_enode]) for msg in consumer: peer = msg.value.decode("utf8") if peer not in registered: backend.get("admin_addTrustedPeer", [peer]) backend.get("admin_addPeer", [peer]) logger.info("Added %s as trusted peer", peer) registered.add(peer)
def __init__(self, connection_info, advanced_info, topic_in, topic_out, predictor, _type): self.connection_info = connection_info self.advanced_info = advanced_info self.predictor = predictor self.stream_in_name = topic_in self.stream_out_name = topic_out self.consumer = kafka.KafkaConsumer( **self.connection_info, **self.advanced_info.get('consumer', {})) self.consumer.subscribe(topics=[self.stream_in_name]) self.producer = kafka.KafkaProducer( **self.connection_info, **self.advanced_info.get('producer', {})) self.admin = kafka.KafkaAdminClient(**self.connection_info) try: self.topic = NewTopic(self.stream_out_name, num_partitions=1, replication_factor=1) self.admin.create_topics([self.topic]) except kafka.errors.TopicAlreadyExistsError: pass self._type = _type self.native_interface = NativeInterface() self.format_flag = 'explain' self.stop_event = Event() self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None) self.caches = {} if self._type == 'timeseries': super().__init__(target=KafkaStream.make_timeseries_predictions, args=(self, )) else: super().__init__(target=KafkaStream.make_prediction, args=(self, ))
def create_kafka_consumer(self): # Create kafka consumer. Wait until kafka broker becomes available if necessary # while self._consumer is None: try: logger.info( 'Kafka: starting a consumer for topics {}..'.format( KAFKA_REQUEST_TOPICS)) if self._consumer_timeout_ms is not None: # TODO improve kafka initialization. self._consumer = kafka.KafkaConsumer( *self._topics, bootstrap_servers=self._bootstrap_servers, consumer_timeout_ms=self._consumer_timeout_ms) else: self._consumer = kafka.KafkaConsumer( *self._topics, bootstrap_servers=self._bootstrap_servers) except: logger.info("Failed to create kafka consumer. Error {}".format( sys.exc_info()[0])) logger.info("Waiting 5s before retrying..") time.sleep(5) logger.info("Consumer Created.") while self._adminclient is None: try: self._adminclient = kafka.KafkaAdminClient( bootstrap_servers=self._bootstrap_servers) except: logger.info( "Failed to create kafka admin client. Error {}".format( sys.exc_info()[0])) logger.info("Waiting 5s before retrying..") time.sleep(5) logger.info('Kafka: creating topics..') while self._topics_ready is False: try: self._topics_ready = True for topic in self._topics: topic_partitions = self._consumer.partitions_for_topic( topic) if topic_partitions is None: self._adminclient.create_topics([ kafka.admin.NewTopic( topic, num_partitions=KAFKA_CONSUMER_PARTITIONS, replication_factor=KAFKA_CONSUMER_REPLICAS) ]) self._topics_ready = False time.sleep(2) except: self._topics_ready = False logger.info("Failed to create kafka topics. Error {}".format( sys.exc_info())) logger.info("Waiting 2s before retrying..") time.sleep(2) self._adminclient.close() logger.info("Kafka: Topics Created.")
def main(): parser = argparse.ArgumentParser( description="Kafka client to get groups and topics status") parser.add_argument( "--server", type=str, metavar="HOST", default="localhost", help="Kafka bootstrap-server address", ) parser.add_argument( "--port", type=int, metavar="PORT", default=9092, help="Kafka bootstrap-server port", ) parser.add_argument( "--client", type=str, default="ch-kafka-python", help="custom client id for this producer", ) args = parser.parse_args() config = { "bootstrap_servers": f"{args.server}:{args.port}", "client_id": args.client, } client = kafka.KafkaAdminClient(**config) consumer = kafka.KafkaConsumer(**config) cluster = client._client.cluster topics = cluster.topics() for topic in topics: print(f'Topic "{topic}":', end="") for partition in cluster.partitions_for_topic(topic): tp = kafka.TopicPartition(topic, partition) print( f" {partition} (begin: {consumer.beginning_offsets([tp])[tp]}, end: {consumer.end_offsets([tp])[tp]})", end="", ) print() groups = client.list_consumer_groups() for group in groups: print(f'Group "{group[0]}" ({group[1]}):') consumer = kafka.KafkaConsumer(**config, group_id=group[0]) offsets = client.list_consumer_group_offsets(group[0]) for topic, offset in offsets.items(): print( f"\t{topic.topic}[{topic.partition}]: {consumer.beginning_offsets([topic])[topic]}, {offset.offset}, {consumer.end_offsets([topic])[topic]}" ) consumer.close() client.close() return 0
def clean_environment(conf: Config): log.info("Connecting to: %s", conf["brokers"]) admin = k.KafkaAdminClient(bootstrap_servers=[conf["brokers"]]) for topic in admin.list_topics(): log.info("Found topic: %s", topic) log.debug("Deleting %s topic", conf["topic"]) try: admin.delete_topics([conf["topic"]]) except Exception as e: log.warning("topic %s doesn't exists", conf["topic"]) admin.close()
def main(): parser = argparse.ArgumentParser(description="Kafka Topic manager") parser.add_argument( "--server", type=str, metavar="HOST", default="localhost", help="Kafka bootstrap-server address", ) parser.add_argument( "--port", type=int, metavar="PORT", default=9092, help="Kafka bootstrap-server port", ) parser.add_argument( "--client", type=str, default="ch-kafka-python", help="custom client id for this producer", ) commands = parser.add_mutually_exclusive_group() commands.add_argument( "--create", type=str, metavar="TOPIC", nargs="+", help="create new topic(s) in the cluster", ) commands.add_argument( "--delete", type=str, metavar="TOPIC", nargs="+", help="delete existing topic(s) from the cluster", ) args = parser.parse_args() config = { "bootstrap_servers": f"{args.server}:{args.port}", "client_id": args.client, } client = kafka.KafkaAdminClient(**config) if args.create: print((client.create_topics(args.create))) elif args.delete: print((client.delete_topics(args.delete))) client.close() return 0
def main(): parser = argparse.ArgumentParser( description='Kafka client to get groups and topics status') parser.add_argument('--server', type=str, metavar='HOST', default='localhost', help='Kafka bootstrap-server address') parser.add_argument('--port', type=int, metavar='PORT', default=9092, help='Kafka bootstrap-server port') parser.add_argument('--client', type=str, default='ch-kafka-python', help='custom client id for this producer') args = parser.parse_args() config = { 'bootstrap_servers': f'{args.server}:{args.port}', 'client_id': args.client, } client = kafka.KafkaAdminClient(**config) consumer = kafka.KafkaConsumer(**config) cluster = client._client.cluster topics = cluster.topics() for topic in topics: print(f'Topic "{topic}":', end='') for partition in cluster.partitions_for_topic(topic): tp = kafka.TopicPartition(topic, partition) print( f' {partition} (begin: {consumer.beginning_offsets([tp])[tp]}, end: {consumer.end_offsets([tp])[tp]})', end='') print() groups = client.list_consumer_groups() for group in groups: print(f'Group "{group[0]}" ({group[1]}):') consumer = kafka.KafkaConsumer(**config, group_id=group[0]) offsets = client.list_consumer_group_offsets(group[0]) for topic, offset in offsets.items(): print( f'\t{topic.topic}[{topic.partition}]: {consumer.beginning_offsets([topic])[topic]}, {offset.offset}, {consumer.end_offsets([topic])[topic]}' ) consumer.close() client.close() return 0
def setup_environment(conf: Config): admin = k.KafkaAdminClient(bootstrap_servers=conf["brokers"]) log.debug("Creating %s topic", conf["topic"]) try: admin.create_topics([ kadmin.NewTopic(conf["topic"], num_partitions=conf["num_partitions"], replication_factor=1) ]) except Exception as e: pass admin.close()
def main(): parser = argparse.ArgumentParser(description='Kafka Topic manager') parser.add_argument('--server', type=str, metavar='HOST', default='localhost', help='Kafka bootstrap-server address') parser.add_argument('--port', type=int, metavar='PORT', default=9092, help='Kafka bootstrap-server port') parser.add_argument('--client', type=str, default='ch-kafka-python', help='custom client id for this producer') commands = parser.add_mutually_exclusive_group() commands.add_argument('--create', type=str, metavar='TOPIC', nargs='+', help='create new topic(s) in the cluster') commands.add_argument('--delete', type=str, metavar='TOPIC', nargs='+', help='delete existing topic(s) from the cluster') args = parser.parse_args() config = { 'bootstrap_servers': f'{args.server}:{args.port}', 'client_id': args.client, } client = kafka.KafkaAdminClient(**config) if args.create: print((client.create_topics(args.create))) elif args.delete: print((client.delete_topics(args.delete))) client.close() return 0
def connect_admin( servers: str, exception: bool = False) -> kafka.admin.client.KafkaAdminClient: """ Connect to Kafka admin consul :args: servers:str - producer server exception:bool - whether or not to print exceptions :params: admin:kafka.admin.client.KafkaAdminClient - connection to Kafka admin :return: admin """ try: admin = kafka.KafkaAdminClient(bootstrap_servers=servers) except Exception as e: admin = None if exception is True: print( f'Failed to connect to admin against servers {servers} (Error: {e})' ) return admin
def kafka_python_client(self) -> kafka.KafkaAdminClient: return kafka.KafkaAdminClient(**self._config.create_kafka_python_config(), api_version_auto_timeout_ms=30000)
def create_kafka_admin_client(**configs) -> kafka.KafkaAdminClient: return kafka.KafkaAdminClient(**configs)
producer_handler(websocket, path)) done, pending = await asyncio.wait( [consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED, ) for task in pending: task.cancel() while True: time.sleep(1) log.warning("Trying to connect to kafka") try: adminClient = kafka.KafkaAdminClient(bootstrap_servers=KAFKA_BROKERS) break except Exception as e: log.exception("Could not create admin client ", exc_info=e) try: adminClient.create_topics([NewTopic(KAFKA_TOPIC, 1, 1)]) except Exception as e: log.exception("Could not create topic ", exc_info=e) executor = ThreadPoolExecutor(max_workers=50) start_server = websockets.serve(chat, "0.0.0.0", WS_PORT) log.warning(f"Started ws server on port {WS_PORT}") asyncio.get_event_loop().run_until_complete(start_server)
def set_client_admin(self, name: str = "pyKafkaClient"): self.clientAdmin = kafka.KafkaAdminClient( bootstrap_servers=self.endpoints, client_id=name, request_timeout_ms=10000)
def admin(self, **kwargs): self._expose() options = self._add_default_options(kwargs) return kafka.KafkaAdminClient(**options)
def _get_connection(self): return kafka.KafkaAdminClient(**self.connection_params)
def get_admin(**kwargs): logging.info("getting admin") return kafka.KafkaAdminClient(**kwargs)
help="Adds topic. Takes <name> <partitions> <replication factor>", nargs=3) group.add_argument("-delete", help="Deletes topic. Takes <Topic name>") args = parser.parse_args() host = 'localhost:19092' if args.host: host = args.host if args.list: # To consume latest messages and auto-commit offsets consumer = kafka.KafkaConsumer(bootstrap_servers=[host]) topics = consumer.topics() for topic in topics: print(topic) elif args.add: admin = kafka.KafkaAdminClient(bootstrap_servers=[host]) topics = [] topics.append( kafka.admin.NewTopic(name=args.add[0], num_partitions=int(args.add[1]), replication_factor=int(args.add[2]))) res = admin.create_topics(new_topics=topics) print(res) elif args.delete: admin = kafka.KafkaAdminClient(bootstrap_servers=[host]) topics = [] topics.append(args.delete) res = admin.delete_topics(topics) print(res)