def kafka_topic(kafka_server: dict): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, kafka_server["service"]["name"]) return { "name": config.EPHEMERAL_TOPIC_NAME, "service_name": kafka_server["service"]["name"] }
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == '' # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name)) marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path sdk_marathon.update_app(foldered_name, marathon_config) sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper') assert zookeeper.rstrip('\n') == zk_path # topic created earlier against default zk should no longer be present: topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True) test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == '' # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name)) marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path sdk_marathon.update_app(foldered_name, marathon_config) sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper') assert zookeeper.rstrip('\n') == zk_path # topic created earlier against default zk should no longer be present: topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True) test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
def test_topic_offsets_increase_with_writes(kafka_server: dict): package_name = kafka_server["package_name"] service_name = kafka_server["service"]["name"] def offset_is_valid(result) -> bool: initial = result[0] offsets = result[1] LOG.info("Checking validity with initial=%s offsets=%s", initial, offsets) has_elements = bool(topics.filter_empty_offsets(offsets, additional=initial)) # The return of this function triggers the restart. return not has_elements @retrying.retry(stop_max_delay=5*60*1000, wait_exponential_multiplier=1000, wait_exponential_max=60 * 1000, retry_on_result=offset_is_valid) def get_offset_change(topic_name, initial_offsets=[]): """ Run: `dcos kafa topic offsets --time="-1"` until the output is not the initial output specified """ LOG.info("Getting offsets for %s", topic_name) offsets = sdk_cmd.svc_cli(package_name, service_name, 'topic offsets --time="-1" {}'.format(topic_name), json=True) LOG.info("offsets=%s", offsets) return initial_offsets, offsets topic_name = str(uuid.uuid4()) LOG.info("Creating topic: %s", topic_name) test_utils.create_topic(topic_name, service_name) _, offset_info = get_offset_change(topic_name) # offset_info is a list of (partition index, offset) key-value pairs sum the # integer representations of the offsets initial_offset = sum(map(lambda partition: sum(map(int, partition.values())), offset_info)) LOG.info("Initial offset=%s", initial_offset) num_messages = 10 LOG.info("Sending %s messages", num_messages) write_info = sdk_cmd.svc_cli( package_name, service_name, 'topic producer_test {} {}'.format(topic_name, num_messages), json=True) assert len(write_info) == 1 assert write_info['message'].startswith('Output: {} records sent'.format(num_messages)) _, post_write_offset_info = get_offset_change(topic_name, offset_info) post_write_offset = sum(map(lambda partition: sum(map(int, partition.values())), post_write_offset_info)) LOG.info("Post-write offset=%s", post_write_offset) assert post_write_offset > initial_offset
def test_topic_offsets_increase_with_writes(kafka_server: dict): package_name = kafka_server["package_name"] service_name = kafka_server["service"]["name"] def offset_is_valid(result) -> bool: initial = result[0] offsets = result[1] LOG.info("Checking validity with initial=%s offsets=%s", initial, offsets) has_elements = bool(topics.filter_empty_offsets(offsets, additional=initial)) # The return of this function triggers the restart. return not has_elements @retrying.retry(stop_max_delay=5*60*1000, wait_exponential_multiplier=1000, wait_exponential_max=60 * 1000, retry_on_result=offset_is_valid) def get_offset_change(topic_name, initial_offsets=[]): """ Run: `dcos kafa topic offsets --time="-1"` until the output is not the initial output specified """ LOG.info("Getting offsets for %s", topic_name) offsets = sdk_cmd.svc_cli(package_name, service_name, 'topic offsets --time="-1" {}'.format(topic_name), json=True) LOG.info("offsets=%s", offsets) return initial_offsets, offsets topic_name = str(uuid.uuid4()) LOG.info("Creating topic: %s", topic_name) test_utils.create_topic(topic_name, service_name) _, offset_info = get_offset_change(topic_name) # offset_info is a list of (partition index, offset) key-value pairs sum the # integer representations of the offsets initial_offset = sum(map(lambda partition: sum(map(int, partition.values())), offset_info)) LOG.info("Initial offset=%s", initial_offset) num_messages = 10 LOG.info("Sending %s messages", num_messages) write_info = sdk_cmd.svc_cli( package_name, service_name, 'topic producer_test {} {}'.format(topic_name, num_messages), json=True) assert len(write_info) == 1 assert write_info['message'].startswith('Output: {} records sent'.format(num_messages)) _, post_write_offset_info = get_offset_change(topic_name, offset_info) post_write_offset = sum(map(lambda partition: sum(map(int, partition.values())), post_write_offset_info)) LOG.info("Post-write offset=%s", post_write_offset) assert post_write_offset > initial_offset
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, "{}-".format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] == "" # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = "master.mesos:2181/{}/CUSTOMPATH".format( sdk_utils.get_zk_path(foldered_name)) marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] = zk_path sdk_marathon.update_app(marathon_config) sdk_tasks.check_tasks_updated(foldered_name, "{}-".format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME, foldered_name, "zookeeper") assert zookeeper == zk_path # topic created earlier against default zk should no longer be present: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "topic list") assert rc == 0, "Topic list command failed" test_utils.assert_topic_lists_are_equal_without_automatic_topics( [], json.loads(stdout))
def test_topic_create(kafka_server: dict): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, kafka_server["service"]["name"])
def test_topic_create_overlay(): test_utils.create_topic()
def test_topic_create_overlay(): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME)
def test_topic_create_overlay(): test_utils.create_topic()
def test_topic_create(): test_utils.create_topic(sdk_utils.get_foldered_name(config.SERVICE_NAME))
def test_topic_create_overlay(): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME)
def test_topic_create(): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, sdk_utils.get_foldered_name(config.SERVICE_NAME))
def test_topic_create(): test_utils.create_topic(FOLDERED_SERVICE_NAME)
def test_topic_create(kafka_server: dict): test_utils.create_topic(config.EPHEMERAL_TOPIC_NAME, kafka_server["service"]["name"])
def test_topic_create(): test_utils.create_topic(FOLDERED_SERVICE_NAME)