示例#1
0
def randomly_generated_topics(confluent_admin_client: AdminClient,
                              prefix="") -> str:
    topic_id = prefix + "".join(random.choices(ascii_letters, k=5))
    future: Future = confluent_admin_client.create_topics(
        [NewTopic(topic_id, num_partitions=1, replication_factor=1)])[topic_id]
    while not future.done() or future.cancelled():
        if future.result():
            raise RuntimeError
    return topic_id
示例#2
0
def topic_and_partitions(
        request, confluent_admin_client: AdminClient,
        running_cluster_config: Dict[str, str]) -> Iterable[Tuple[str, int]]:
    """
    Creates a kafka topic consisting of a random 5 character string and being partition into 1, 2 or 4 partitions.
    Then it yields the tuple (topic, n_partitions).

    Prints topic information before and after topic was used by a test.
    :return: Topic and number of partitions within it.
    """
    topic_id = rand_text(5)
    partitions = request.param

    confluent_admin_client.create_topics(
        [NewTopic(topic_id, num_partitions=partitions, replication_factor=1)])

    yield topic_id, partitions

    confluent_admin_client.delete_topics(
        [NewTopic(topic_id, num_partitions=partitions, replication_factor=1)])
示例#3
0
def create_topic():
    """Creates the producer topic if it does not already exist"""
    # Creates the topic
    logger.info("Creating topic %s", TOPIC)
    try:
        client = AdminClient({'bootstrap.servers': BROKER_URL})
        topic = NewTopic(TOPIC, num_partitions=1, replication_factor=1)
        client.create_topics([topic])
        logger.info("Topic created successfully")
    except Exception as e:
        logger.error("failed to create topic %s, error : %s", TOPIC, e)
        raise
    def test_002_test_create_topic(self):

        new_topics = [NewTopic("testrun", 1, 1)]

        result_futures = self.admin_client.create_topics(new_topics,
                                                         request_timeout=15.0)

        for topic, f in result_futures.items():
            try:
                f.result()  # The result itself is None
                self.assertTrue(True)
            except Exception as e:
                assert_that(str(e), contains_string("already exists."))
 def create_topics(self, topics: List[Topic]):
     for topic in topics:
         partitions = topic.num_partitions if topic.num_partitions is not None else self.config.default_partitions
         replicas = (topic.replication_factor
                     if topic.replication_factor is not None else
                     self.config.default_replication_factor)
         new_topic = NewTopic(topic.name,
                              num_partitions=partitions,
                              replication_factor=replicas,
                              config=topic.config)
         future_list = self.cluster.confluent_client.create_topics(
             [new_topic])
         ensure_kafka_futures_done(list(future_list.values()))
示例#6
0
 def _assign_consumer_to_last_offset(self):
     off_topic = self.config["offset_topic"]
     partition = TopicPartition(off_topic, 0)
     try:
         _, high_offset = self._offset_consumer.get_watermark_offsets(
             partition, timeout=10)
     except KafkaException:
         logger.warning(
             f"Offset topic {off_topic} was not found, creating it now.")
         self._admin.create_topics(
             [NewTopic(off_topic, num_partitions=1, replication_factor=1)],
             operation_timeout=120)
         high_offset = 0
     partition.offset = max(0, high_offset - 1)
     self._offset_consumer.assign([partition])
    def test_003_create_topic_with_config(self):
        config = {
            "delete.retention.ms": 3600,
            "retention.bytes": 10000,
            "retention.ms": 3600,
        }
        new_topics = [NewTopic("test_config", 1, 1, config=config)]

        result_futures = self.admin_client.create_topics(new_topics=new_topics)

        for topic, f in result_futures.items():
            try:
                f.result()  # The result itself is None
                self.assertTrue(True)
            except Exception as e:
                assert_that(str(e), contains_string("already exists."))
示例#8
0
        def create_topic(a: AdminClient,
                         topic: str,
                         num_partitions: int,
                         replication_factor=1):
            new_topic = [
                NewTopic(topic,
                         num_partitions=num_partitions,
                         replication_factor=replication_factor)
            ]
            fs = a.create_topics(new_topic)

            for topic, f in fs.items():
                try:
                    f.result()
                    print("Topic {} created".format(topic))
                except Exception as e:
                    print("Failed to create topic {}: {}".format(topic, e))
示例#9
0
 def create_topics(self, topics: List[Topic]):
     for topic in topics:
         partitions = (topic.num_partitions if topic.num_partitions
                       is not None else self.config.default_num_partitions)
         replicas = (topic.replication_factor
                     if topic.replication_factor is not None else
                     self.config.default_replication_factor)
         new_topic = NewTopic(topic.name,
                              num_partitions=partitions,
                              replication_factor=replicas,
                              config=topic.config)
         future_list = self.cluster.confluent_client.create_topics(
             [new_topic], operation_timeout=60)
         ensure_kafka_future_done(next(islice(future_list.values(), 1)))
         for _ in range(80):
             topic_data = self.cluster.confluent_client.list_topics(
                 topic=topic.name).topics[topic.name]
             if topic_data.error is None:
                 break
             time.sleep(0.125)
         else:
             raise RuntimeError(f"Couldn't create topic {topic}")
示例#10
0
def create_topic(client: AdminClient,
                 name: str,
                 partitions: int,
                 replication_factor: int = -1):
    nt = NewTopic(
        topic=name,
        num_partitions=partitions,
        replication_factor=replication_factor,
    )

    # Call create_topics to asynchronously create topics, a dict
    # of <topic,future> is returned.
    fs = client.create_topics(new_topics=[nt])

    # Wait for operation to finish.
    # Timeouts are preferably controlled by passing request_timeout=15.0
    # to the create_topics() call.
    # All futures will finish at the same time.
    for topic, f in fs.items():
        try:
            f.result()  # The result itself is None
            print("Topic {} created".format(topic))
        except Exception as e:
            print("Failed to create topic {}: {}".format(topic, e))
示例#11
0
 def to_new_topic(self):
     return NewTopic(self.name,
                     num_partitions=self.partitions,
                     replication_factor=self.replication)
示例#12
0
from confluent_kafka.admin import AdminClient
import argparse
import uuid
from confluent_kafka.cimpl import NewTopic

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="create kafka topic. Default partitions is 1")
    parser.add_argument("-t", "--topic", help="the topic to create")
    parser.add_argument("-b",
                        "--broker",
                        help="the broker to create the topics on")
    parser.add_argument("-p", "--partitions", type=int, default=1)
    args = parser.parse_args()

    broker = args.broker
    conf = {"bootstrap.servers": broker}
    admin_client = AdminClient(conf)

    conf["group.id"] = str(uuid.uuid4())
    topic = args.topic
    partitions = args.partitions
    new_topic = NewTopic(topic, num_partitions=partitions)
    admin_client.create_topics([new_topic])
    admin_client.poll(10)
示例#13
0
def create_new_topic(topic_name: str, client: AdminClient):
    dummy_topic = NewTopic(topic_name, 1, replication_factor=1)
    topic_structure = client.create_topics([dummy_topic])
    for topic, future in topic_structure.items():
        future.result()
示例#14
0
]

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description=
        "create topics if they dont exist using the output of CS:INSTLIST")
    parser.add_argument("filename")
    parser.add_argument("--broker", help="the broker to create the topics on")
    args = parser.parse_args()

    broker = args.broker
    conf = {"bootstrap.servers": broker}
    admin_client = AdminClient(conf)

    conf["group.id"] = str(uuid.uuid4())
    cons = Consumer(conf)
    topics = cons.list_topics()
    topics_list = topics.topics

    with open(args.filename) as file:
        json = json.load(file)
    for item in json:
        inst_name = item["name"]
        for topic_suffix in TOPICS_PER_INST:
            topic_to_check = inst_name + topic_suffix
            if topic_to_check not in topics_list:
                print(f"creating {topic_to_check}")
                new_topic = NewTopic(topic_to_check, num_partitions=1)
                admin_client.create_topics([new_topic])
    admin_client.poll(10)
示例#15
0
import random

from confluent_kafka.admin import AdminClient
from confluent_kafka.cimpl import NewTopic

if __name__ == '__main__':

    topic = 'temperature'

    admin_client = AdminClient({
        'bootstrap.servers':
        'localhost:32768,localhost:32769,localhost:32770'
    })

    admin_client.create_topics(
        [NewTopic(topic, num_partitions=3, replication_factor=1)])

    # Create Producer instance
    producer = Producer({
        # Exercise: Add your producer configuration here
    })

    delivered_records = 0

    # Optional per-message on_delivery handler (triggered by poll() or flush())
    # when a message has been successfully delivered or
    # permanently failed delivery (after retries).
    def acked(err, msg):
        global delivered_records
        """Delivery report handler called on
        successful or failed delivery of message
示例#16
0
 def create_topic(self):
     """Creates the producer topic if it does not already exist"""
     new_topic = NewTopic(self.topic_name, num_partitions=self.num_partitions, replication_factor=self.num_replicas)
     topic_creation = BaseProducer.admin_client.create_topics([new_topic])
     return topic_creation