Example #1
0
 def _create_kafka_client(self):
     kafka_conn_str = self.instance.get('kafka_connect_str')
     if not isinstance(kafka_conn_str, (string_types, list)):
         raise ConfigurationError('kafka_connect_str should be string or list of strings')
     kafka_version = self.instance.get('kafka_client_api_version')
     if isinstance(kafka_version, str):
         kafka_version = tuple(map(int, kafka_version.split(".")))
     kafka_client = KafkaClient(
         bootstrap_servers=kafka_conn_str,
         client_id='dd-agent',
         request_timeout_ms=self.init_config.get('kafka_timeout', DEFAULT_KAFKA_TIMEOUT) * 1000,
         # if `kafka_client_api_version` is not set, then kafka-python automatically probes the cluster for broker
         # version during the bootstrapping process. Note that probing randomly picks a broker to probe, so in a
         # mixed-version cluster probing returns a non-deterministic result.
         api_version=kafka_version,
         # While we check for SSL params, if not present they will default to the kafka-python values for plaintext
         # connections
         security_protocol=self.instance.get('security_protocol', 'PLAINTEXT'),
         sasl_mechanism=self.instance.get('sasl_mechanism'),
         sasl_plain_username=self.instance.get('sasl_plain_username'),
         sasl_plain_password=self.instance.get('sasl_plain_password'),
         sasl_kerberos_service_name=self.instance.get('sasl_kerberos_service_name', 'kafka'),
         sasl_kerberos_domain_name=self.instance.get('sasl_kerberos_domain_name'),
         ssl_cafile=self.instance.get('ssl_cafile'),
         ssl_check_hostname=self.instance.get('ssl_check_hostname', True),
         ssl_certfile=self.instance.get('ssl_certfile'),
         ssl_keyfile=self.instance.get('ssl_keyfile'),
         ssl_crlfile=self.instance.get('ssl_crlfile'),
         ssl_password=self.instance.get('ssl_password'),
     )
     # Force initial population of the local cluster metadata cache
     kafka_client.poll(future=kafka_client.cluster.request_update())
     if kafka_client.cluster.topics(exclude_internal_topics=False) is None:
         raise RuntimeError("Local cluster metadata cache did not populate.")
     return kafka_client
topic_name = "creditcard"
topic_list = [
    NewTopic(
        name=topic_name,
        num_partitions=1,
        replication_factor=1,
        topic_configs={'retention.ms': '300000'}
    )
]

# Retrieving already-created list of topics and then deleting

client = KafkaClient(bootstrap_servers=['localhost:9092'])
metadata = client.cluster
future = client.cluster.request_update()
client.poll(future=future)
broker_topics = metadata.topics()

admin_client = KafkaAdminClient(bootstrap_servers=['localhost:9092'])
if topic_name in broker_topics:
    deletion = admin_client.delete_topics([topic_name])
    sleep(2)
    try:
        future = client.cluster.request_update()
        client.poll(future=future)
    except KafkaError as e:
        print(e)
        pass
#admin_client.create_topics(new_topics=topic_list, validate_only=False)

producer = KafkaProducer(bootstrap_servers=['localhost:9092'],
#!/usr/bin/python3

from kafka import KafkaClient

client = KafkaClient(bootstrap_servers='kafka1')
client.poll(timeout_ms=1000)
topics = list(client.cluster.topics(exclude_internal_topics=True))
print(topics)