Пример #1
0
def test_get_topics_with_prefix(
    non_interactive_cli_runner: CliRunner,
    topic_controller: TopicController,
    confluent_admin_client: confluent_kafka.admin.AdminClient,
):
    topic_base = "".join(random.choices(ascii_letters, k=5))
    prefix_1 = "ab"
    prefix_2 = "fx"
    new_topics = [
        prefix_1 + topic_base, prefix_2 + topic_base,
        prefix_1 + prefix_2 + topic_base
    ]
    topic_controller.create_topics(
        [Topic(new_topic, replication_factor=1) for new_topic in new_topics])
    confluent_admin_client.poll(timeout=1)

    result = non_interactive_cli_runner.invoke(get_topics,
                                               ["-p", prefix_1, "-o", "json"],
                                               catch_exceptions=False)

    assert result.exit_code == 0
    retrieved_topics = json.loads(result.output)
    assert len(retrieved_topics) > 1
    for retrieved_topic in retrieved_topics:
        assert retrieved_topic.startswith(prefix_1)
Пример #2
0
    def create_consumer_group_offset_change_plan(
        self,
        consumer_id: str,
        topic_name: str,
        offset_to_value: Optional[int],
        offset_by_delta: Optional[int],
        offset_to_timestamp: Optional[str],
        offset_from_group: Optional[str],
    ) -> List[ConsumerGroupOffsetPlan]:

        consumer_group_state, offset_plans = self._read_current_consumergroup_offsets(
            consumer_id=consumer_id, topic_name_expression=topic_name)
        if consumer_group_state == "Dead":
            self._logger.error(
                "The consumer group {} does not exist.".format(consumer_id))
            return None
        elif consumer_group_state == "Empty":
            if offset_to_value:
                for plan_element in offset_plans.values():
                    (allowed_offset, error,
                     message) = self._select_new_offset_for_consumer(
                         offset_to_value, plan_element)
                    plan_element.proposed_offset = allowed_offset
                    if error:
                        self._logger.error(message)
            elif offset_by_delta:
                for plan_element in offset_plans.values():
                    requested_offset = plan_element.current_offset + offset_by_delta
                    (allowed_offset, error,
                     message) = self._select_new_offset_for_consumer(
                         requested_offset, plan_element)
                    plan_element.proposed_offset = allowed_offset
                    if error:
                        self._logger.error(message)
            elif offset_to_timestamp:
                timestamp_limit = pendulum.parse(offset_to_timestamp)
                current_offset_dict = TopicController(
                    self.cluster, None).get_offsets_closest_to_timestamp(
                        topic_name=topic_name, timestamp_limit=timestamp_limit)
                for plan_element in offset_plans.values():
                    plan_element.current_offset = current_offset_dict.get(
                        plan_element.partition_id, 0)
            elif offset_from_group:
                _, mirror_consumer_group = self._read_current_consumergroup_offsets(
                    consumer_id=offset_from_group,
                    topic_name_expression=topic_name)
                for key, value in mirror_consumer_group.items():
                    if key in offset_plans.keys():
                        offset_plans[
                            key].proposed_offset = value.current_offset
                    else:
                        value.current_offset = 0
                        offset_plans[key] = value
            return list(offset_plans.values())
        else:
            self._logger.error(
                "Consumergroup {} is not empty. Use the {} option if you want to override this safety mechanism."
                .format(consumer_id, red_bold("--force")))
            return list(offset_plans.values())
Пример #3
0
def test_topic_creation_raises_for_wrong_config(
        topic_controller: TopicController,
        confluent_admin_client: confluent_kafka.admin.AdminClient,
        topic_id: str):
    topics = confluent_admin_client.list_topics(timeout=5).topics.keys()
    assert topic_id not in topics
    # We only have 1 broker for tests, so a higher replication should fail
    with pytest.raises(KafkaException):
        topic_controller.create_topics([Topic(topic_id, replication_factor=2)])
Пример #4
0
def test_topic_creation_works(
        topic_controller: TopicController,
        confluent_admin_client: confluent_kafka.admin.AdminClient,
        topic_id: str):
    topics = confluent_admin_client.list_topics(timeout=5).topics.keys()
    assert topic_id not in topics
    topic_controller.create_topics([Topic(topic_id, replication_factor=1)])

    topics = confluent_admin_client.list_topics(timeout=5).topics.keys()
    assert topic_id in topics
Пример #5
0
def test_topic_diff(topic_controller: TopicController, topic_id: str):
    # the value we get from cluster configs is as string
    # testing against this is important to ensure consistency
    default_delete_retention = "86400000"
    topic_conf = {
        "name": topic_id,
        "replication_factor": 1,
        "num_partitions": 50,
        "config": {
            "cleanup.policy": "compact"
        },
    }
    get_diff = topic_controller.diff_with_cluster

    conf = json.loads(json.dumps(topic_conf))
    topic = Topic.from_dict(conf)
    topic_controller.create_topics([topic])
    assert not get_diff(
        topic).has_changes, "Shouldn't have diff on just created topic"

    conf = json.loads(json.dumps(topic_conf))
    conf["config"]["cleanup.policy"] = "delete"
    topic = Topic.from_dict(conf)
    diff = TopicDiff().set_diff("cleanup.policy", "compact", "delete")
    assert get_diff(topic) == diff, "Should have a diff on cleanup.policy"

    conf = json.loads(json.dumps(topic_conf))
    conf["config"]["delete.retention.ms"] = 1500
    topic = Topic.from_dict(conf)
    diff = TopicDiff().set_diff("delete.retention.ms",
                                default_delete_retention, 1500)
    assert get_diff(topic) == diff, "Should have a diff on delete.retention.ms"

    # the same as before, but this time with string values
    conf = json.loads(json.dumps(topic_conf))
    conf["config"]["delete.retention.ms"] = "1500"
    topic = Topic.from_dict(conf)
    diff = TopicDiff().set_diff("delete.retention.ms",
                                default_delete_retention, "1500")
    assert get_diff(topic) == diff, "Should have a diff on delete.retention.ms"

    conf = json.loads(json.dumps(topic_conf))
    conf["num_partitions"] = 3
    topic = Topic.from_dict(conf)
    diff = TopicDiff().set_diff("num_partitions", 50, 3)
    assert get_diff(topic) == diff, "Should have a diff on num_partitions"

    conf = json.loads(json.dumps(topic_conf))
    conf["replication_factor"] = 3
    topic = Topic.from_dict(conf)
    diff = TopicDiff().set_diff("replication_factor", 1, 3)
    assert get_diff(topic) == diff, "Should have a diff on replication_factor"
Пример #6
0
def test_edit_topic_works(
    interactive_cli_runner: CliRunner,
    monkeypatch: MonkeyPatch,
    topic_controller: TopicController,
    confluent_admin_client: confluent_kafka.admin.AdminClient,
    topic: str,
):

    topics = confluent_admin_client.list_topics(timeout=5).topics.keys()
    assert topic in topics

    config_dict = {
        "config": {
            "cleanup.policy": "delete",
            "compression.type": "producer",
            "delete.retention.ms": "123456789",
            "file.delete.delay.ms": "60000",
            "flush.messages": "123456789",
            "flush.ms": "9223372036854775807",
            "follower.replication.throttled.replicas": "",
            "index.interval.bytes": "4096",
            "leader.replication.throttled.replicas": "",
            "max.message.bytes": "1000012",
            "message.downconversion.enable": "true",
            "message.format.version": "2.2-IV1",
            "message.timestamp.difference.max.ms": "123456789",
            "message.timestamp.type": "CreateTime",
            "min.cleanable.dirty.ratio": "0.5",
            "min.compaction.lag.ms": "0",
            "min.insync.replicas": "1",
            "preallocate": "false",
            "retention.bytes": "-1",
            "retention.ms": "123456789",
            "segment.bytes": "123456789",
            "segment.index.bytes": "123456789",
            "segment.jitter.ms": "0",
            "segment.ms": "123456789",
            "unclean.leader.election.enable": "true",
        }
    }

    def mock_edit_function(text=None,
                           editor=None,
                           env=None,
                           require_save=None,
                           extension=None,
                           filename=None):
        return yaml.dump(config_dict, default_flow_style=False)

    monkeypatch.setattr(click, "edit", mock_edit_function)
    result = interactive_cli_runner.invoke(edit_topic,
                                           topic,
                                           input="y\n",
                                           catch_exceptions=False)
    assert result.exit_code == 0

    topic_config_dict = topic_controller.get_cluster_topic(topic).as_dict(
        only_editable=True)
    for key, value in config_dict["config"].items():
        assert (key, topic_config_dict["config"][key]) == (key, value)
Пример #7
0
 def _set_offset_to_timestamp(self, offset_plans, offset_to_timestamp,
                              topic_name):
     timestamp_limit = pendulum.parse(offset_to_timestamp)
     proposed_offset_dict = TopicController(
         self.cluster).get_offsets_closest_to_timestamp(
             topic_name=topic_name, timestamp=timestamp_limit)
     for plan_element in offset_plans.values():
         plan_element.proposed_offset = proposed_offset_dict[
             plan_element.partition_id].offset
Пример #8
0
def test_alter_topic_config_works(topic_controller: TopicController,
                                  topic_id: str):
    initial_topic = Topic(topic_id, config={"cleanup.policy": "delete"})

    topic_controller.create_topics([initial_topic])
    topic_controller.update_from_cluster(initial_topic)
    config = initial_topic.config
    assert config.get("cleanup.policy") == "delete"
    change_topic = Topic(topic_id, config={"cleanup.policy": "compact"})
    topic_controller.alter_configs([change_topic])
    topic_controller.update_from_cluster(change_topic)
    after_changes_applied_topic = topic_controller.get_cluster_topic(topic_id)

    final_config = after_changes_applied_topic.config
    assert final_config.get("cleanup.policy") == "compact"
Пример #9
0
def test_correct_amount_of_messages(mocker,
                                    non_interactive_cli_runner: CliRunner,
                                    topic_controller: TopicController):
    delete_topic_mock = mocker.patch.object(TopicController, "delete_topic",
                                            mocker.Mock())

    result = non_interactive_cli_runner.invoke(ping, catch_exceptions=False)
    assert result.exit_code == 0
    assert delete_topic_mock.call_count == 1

    ping_topic = topic_controller.get_cluster_topic(config.PING_TOPIC)
    assert ping_topic.watermarks[0].high == 10
Пример #10
0
def test_alter_topic_config_only_changes_mentioned_attributes(
        topic_controller: TopicController, topic_id: str):
    initial_topic = Topic(topic_id,
                          config={
                              "cleanup.policy": "delete",
                              "min.compaction.lag.ms": "1000000"
                          })

    topic_controller.create_topics([initial_topic])
    topic_controller.update_from_cluster(initial_topic)
    config = initial_topic.config
    assert config.get("cleanup.policy") == "delete"
    assert config.get("min.compaction.lag.ms") == "1000000"
    change_topic = Topic(topic_id, config={"cleanup.policy": "compact"})
    topic_controller.alter_configs([change_topic])
    topic_controller.update_from_cluster(change_topic)
    after_changes_applied_topic = topic_controller.get_cluster_topic(topic_id)

    final_config = after_changes_applied_topic.config
    assert final_config.get("cleanup.policy") == "compact"
    assert final_config.get("min.compaction.lag.ms") == "1000000"
Пример #11
0
def topic_from_template(
    template_topic: str,
    partitions: Optional[int],
    replication_factor: Optional[int],
    topic_controller: TopicController,
    topic_name: str,
) -> Topic:
    template_config = topic_controller.get_cluster_topic(template_topic)

    if partitions is None:
        partitions = template_config.num_partitions

    if replication_factor is None:
        replication_factor = template_config.replication_factor

    config = template_config.config

    topic = Topic(topic_name, num_partitions=partitions, replication_factor=replication_factor, config=config)
    return topic
Пример #12
0
def test_topic_object_works(topic_controller: TopicController, topic: str):
    topic = topic_controller.get_cluster_topic(topic)
    assert isinstance(topic, Topic)
    assert len(topic.watermarks) != 0
Пример #13
0
def test_topic_listing_works(topic_controller: TopicController, topic: str):
    topics = topic_controller.list_topics()
    assert topic in [t.name for t in topics]
Пример #14
0
def test_watermarks(filled_topic: Topic, topic_controller: TopicController):
    topic_controller.update_from_cluster(filled_topic)
    assert filled_topic.watermarks == {0: Watermark(10, 0)}
Пример #15
0
 def topic_controller(self) -> TopicController:
     if self.__topic_controller is None:
         self.__topic_controller = TopicController(self, self._config)
     return self.__topic_controller
Пример #16
0
def test_apply(interactive_cli_runner: CliRunner,
               topic_controller: TopicController, topic_id: str):
    topic_name = f"apply_{topic_id}"
    topic_1 = {
        "name": topic_name + "_1",
        "replication_factor": 1,
        "num_partitions": 50,
        "config": {
            "cleanup.policy": "compact"
        },
    }
    topic_2 = {
        "name": topic_name + "_2",
        "replication_factor": 1,
        "num_partitions": 5,
        "config": {
            "cleanup.policy": "delete",
            "delete.retention.ms": 50000
        },
    }
    apply_conf = {"topics": [topic_1]}

    # 1: topic creation
    path = save_yaml(topic_id, apply_conf)
    result = interactive_cli_runner.invoke(esque,
                                           args=["apply", "-f", path],
                                           input="Y\n",
                                           catch_exceptions=False)
    assert (result.exit_code == 0 and "Successfully applied changes"
            in result.output), f"Calling apply failed, error: {result.output}"

    # 2: change cleanup policy to delete
    topic_1["config"]["cleanup.policy"] = "delete"
    path = save_yaml(topic_id, apply_conf)

    result = interactive_cli_runner.invoke(esque,
                                           args=["apply", "-f", path],
                                           input="Y\n",
                                           catch_exceptions=False)
    assert (result.exit_code == 0 and "Successfully applied changes"
            in result.output), f"Calling apply failed, error: {result.output}"

    # 3: add another topic and change the first one again
    apply_conf["topics"].append(topic_2)
    topic_1["config"]["cleanup.policy"] = "compact"
    path = save_yaml(topic_id, apply_conf)
    result = interactive_cli_runner.invoke(esque,
                                           args=["apply", "-f", path],
                                           input="Y\n",
                                           catch_exceptions=False)
    assert (result.exit_code == 0 and "Successfully applied changes"
            in result.output), f"Calling apply failed, error: {result.output}"

    # 4: no changes
    result = interactive_cli_runner.invoke(esque,
                                           args=["apply", "-f", path],
                                           catch_exceptions=False)
    assert (result.exit_code == 0 and "No changes detected, aborting"
            in result.output), f"Calling apply failed, error: {result.output}"

    # 5: change partitions - this attempt should be cancelled
    topic_1["num_partitions"] = 3
    topic_1["config"]["cleanup.policy"] = "delete"
    path = save_yaml(topic_id, apply_conf)
    result = interactive_cli_runner.invoke(esque,
                                           args=["apply", "-f", path],
                                           input="Y\n",
                                           catch_exceptions=False)
    assert (result.exit_code == 1
            and "to `replication_factor` and `num_partitions`"
            in result.output), f"Calling apply failed, error: {result.output}"
    # reset config to the old settings again
    topic_1["num_partitions"] = 50
    topic_1["config"]["cleanup.policy"] = "compact"

    # final: check results in the cluster to make sure they match
    for topic_conf in apply_conf["topics"]:
        topic_from_conf = Topic.from_dict(topic_conf)
        assert not topic_controller.diff_with_cluster(
            topic_from_conf
        ).has_changes, f"Topic configs don't match, diff is {topic_controller.diff_with_cluster(topic_from_conf)}"