Пример #1
0
def target_topic_avro_consumer(unittest_config: Config, target_topic: Tuple[str, int]) -> AvroConsumer:
    consumer = AvroConsumer(
        {
            "group.id": "asdf",
            "enable.auto.commit": False,
            "enable.partition.eof": False,
            **unittest_config.create_confluent_config(include_schema_registry=True),
        }
    )
    consumer.assign([TopicPartition(topic=target_topic[0], partition=i, offset=0) for i in range(target_topic[1])])
    yield consumer
    consumer.close()
Пример #2
0
def test_confluent_config(config: Config):
    config.context_switch("context_5")
    expected_config = {
        "bootstrap.servers": "kafka:9094,kafka1:9094,kafka2:9094,kafka3:9094",
        "security.protocol": "SASL_SSL",
        "schema.registry.url": "http://schema-registry.example.com",
        "sasl.mechanisms": "PLAIN",
        "sasl.username": "******",
        "sasl.password": "******",
        "ssl.ca.location": "/my/ca.crt",
        "ssl.certificate.location": "/my/certificate.crt",
        "ssl.key.location": "/my/certificate.key",
        "ssl.key.password": "******",
    }

    actual_config = config.create_confluent_config(include_schema_registry=True)
    assert expected_config == actual_config
Пример #3
0
class Cluster:
    def __init__(self):
        self._config = Config()
        self.confluent_client = AdminClient(
            self._config.create_confluent_config())
        self.pykafka_client = pykafka.client.KafkaClient(
            **self._config.create_pykafka_config(), broker_version="1.0.0")
        self.confluent_client.poll(timeout=1)
        self.__topic_controller = None

    @property
    def topic_controller(self) -> TopicController:
        if self.__topic_controller is None:
            self.__topic_controller = TopicController(self, self._config)
        return self.__topic_controller

    @property
    def bootstrap_servers(self):
        return self._config.bootstrap_servers

    def get_metadata(self):
        return self.confluent_client.list_topics(timeout=1)

    @property
    def brokers(self):
        metadata = self.confluent_client.list_topics(timeout=1)
        return sorted(
            [{
                "id": broker.id,
                "host": broker.host,
                "port": broker.port
            } for broker in metadata.brokers.values()],
            key=operator.itemgetter("id"),
        )

    def retrieve_config(self, config_type: ConfigResource.Type, id):
        requested_resources = [ConfigResource(config_type, str(id))]
        futures = self.confluent_client.describe_configs(requested_resources)
        (old_resource, future), = futures.items()
        future = ensure_kafka_futures_done([future])
        result = future.result()
        return unpack_confluent_config(result)
Пример #4
0
def randomly_generated_consumer_groups(filled_topic,
                                       unittest_config: Config,
                                       prefix="") -> str:
    randomly_generated_consumer_group = prefix + "".join(
        random.choices(ascii_letters, k=8))
    _config = unittest_config.create_confluent_config()
    _config.update({
        "group.id": randomly_generated_consumer_group,
        "enable.auto.commit": False,
        "default.topic.config": {
            "auto.offset.reset": "latest"
        },
    })
    _consumer = confluent_kafka.Consumer(_config)
    _consumer.assign(
        [TopicPartition(topic=filled_topic.name, partition=0, offset=0)])
    for i in range(2):
        msg = _consumer.consume(timeout=10)[0]
        _consumer.commit(msg, asynchronous=False)
    return randomly_generated_consumer_group
Пример #5
0
def consumer(topic_object: Topic, consumer_group: str,
             unittest_config: Config):
    _config = unittest_config.create_confluent_config()
    _config.update({
        "group.id": consumer_group,
        "error_cb": log_error,
        # We need to commit offsets manually once we"re sure it got saved
        # to the sink
        "enable.auto.commit": False,
        "enable.partition.eof": False,
        # We need this to start at the last committed offset instead of the
        # latest when subscribing for the first time
        "default.topic.config": {
            "auto.offset.reset": "latest"
        },
    })
    _consumer = confluent_kafka.Consumer(_config)
    _consumer.assign(
        [TopicPartition(topic=topic_object.name, partition=0, offset=0)])
    yield _consumer
Пример #6
0
def avro_producer(test_config: Config):
    producer_config = test_config.create_confluent_config()
    producer_config.update({"schema.registry.url": Config().schema_registry})
    yield AvroProducer(producer_config)
Пример #7
0
def producer(test_config: Config):
    producer_config = test_config.create_confluent_config()
    yield Producer(producer_config)
Пример #8
0
def confluent_admin_client(test_config: Config) -> AdminClient:
    admin = AdminClient(test_config.create_confluent_config())
    admin.poll(timeout=5)
    yield admin