Example #1
0
def test_endpoints():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
    core_site = etree.fromstring(sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints core-site.xml'))
    check_properties(core_site, {
        'ha.zookeeper.parent-znode': '/{}/hadoop-ha'.format(sdk_utils.get_zk_path(
            foldered_name))
    })

    hdfs_site = etree.fromstring(sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints hdfs-site.xml'))
    expect = {
        'dfs.namenode.shared.edits.dir': 'qjournal://{}/hdfs'.format(';'.join([
            sdk_hosts.autoip_host(
                foldered_name,
                'journal-{}-node'.format(i),
                8485
            ) for i in range(3)])),
    }
    for i in range(2):
        name_node = 'name-{}-node'.format(i)
        expect['dfs.namenode.rpc-address.hdfs.{}'.format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9001)
        expect['dfs.namenode.http-address.hdfs.{}'.format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9002)
    check_properties(hdfs_site, expect)
def test_custom_zookeeper(kafka_client: client.KafkaClient):
    broker_ids = sdk_tasks.get_task_ids(FOLDERED_NAME,
                                        "{}-".format(config.DEFAULT_POD_TYPE))

    # create a topic against the default zk:
    kafka_client.create_topic(config.DEFAULT_TOPIC_NAME)

    marathon_config = sdk_marathon.get_config(FOLDERED_NAME)
    # should be using default path when this envvar is empty/unset:
    assert marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] == ""

    # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall:
    zk_path = "master.mesos:2181/{}/CUSTOMPATH".format(
        sdk_utils.get_zk_path(FOLDERED_NAME))
    marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] = zk_path
    sdk_marathon.update_app(marathon_config)

    sdk_tasks.check_tasks_updated(FOLDERED_NAME,
                                  "{}-".format(config.DEFAULT_POD_TYPE),
                                  broker_ids)
    sdk_plan.wait_for_completed_deployment(FOLDERED_NAME)

    # wait for brokers to finish registering
    kafka_client.check_broker_count(config.DEFAULT_BROKER_COUNT)

    zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME,
                                                 FOLDERED_NAME, "zookeeper")
    assert zookeeper == zk_path

    # topic created earlier against default zk should no longer be present:
    rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, FOLDERED_NAME,
                                    "topic list")
    assert rc == 0, "Topic list command failed"

    assert config.DEFAULT_TOPIC_NAME not in json.loads(stdout)
Example #3
0
def test_endpoints():
    # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
    core_site = etree.fromstring(
        sdk_networks.get_endpoint_string(config.PACKAGE_NAME, foldered_name, "core-site.xml")
    )
    check_properties(
        core_site,
        {"ha.zookeeper.parent-znode": "/{}/hadoop-ha".format(sdk_utils.get_zk_path(foldered_name))},
    )

    hdfs_site = etree.fromstring(
        sdk_networks.get_endpoint_string(config.PACKAGE_NAME, foldered_name, "hdfs-site.xml")
    )
    expect = {
        "dfs.namenode.shared.edits.dir": "qjournal://{}/hdfs".format(
            ";".join(
                [
                    sdk_hosts.autoip_host(foldered_name, "journal-{}-node".format(i), 8485)
                    for i in range(3)
                ]
            )
        )
    }
    for i in range(2):
        name_node = "name-{}-node".format(i)
        expect["dfs.namenode.rpc-address.hdfs.{}".format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9001
        )
        expect["dfs.namenode.http-address.hdfs.{}".format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9002
        )
    check_properties(hdfs_site, expect)
def test_custom_zookeeper():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE))

    # create a topic against the default zk:
    test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name)

    marathon_config = sdk_marathon.get_config(foldered_name)
    # should be using default path when this envvar is empty/unset:
    assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == ''

    # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall:
    zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name))
    marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path
    sdk_marathon.update_app(foldered_name, marathon_config)

    sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
    sdk_plan.wait_for_completed_deployment(foldered_name)

    # wait for brokers to finish registering
    test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name)

    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
    assert zookeeper.rstrip('\n') == zk_path

    # topic created earlier against default zk should no longer be present:
    topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True)

    test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
Example #5
0
def test_custom_zookeeper():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE))

    # create a topic against the default zk:
    test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name)

    marathon_config = sdk_marathon.get_config(foldered_name)
    # should be using default path when this envvar is empty/unset:
    assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == ''

    # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall:
    zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name))
    marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path
    sdk_marathon.update_app(foldered_name, marathon_config)

    sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids)
    sdk_plan.wait_for_completed_deployment(foldered_name)

    # wait for brokers to finish registering
    test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name)

    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
    assert zookeeper.rstrip('\n') == zk_path

    # topic created earlier against default zk should no longer be present:
    topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True)

    test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
def test_endpoints():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
    core_site = etree.fromstring(sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints core-site.xml'))
    check_properties(core_site, {
        'ha.zookeeper.parent-znode': '/{}/hadoop-ha'.format(sdk_utils.get_zk_path(
            foldered_name))
    })

    hdfs_site = etree.fromstring(sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints hdfs-site.xml'))
    expect = {
        'dfs.namenode.shared.edits.dir': 'qjournal://{}/hdfs'.format(';'.join([
            sdk_hosts.autoip_host(
                foldered_name,
                'journal-{}-node'.format(i),
                8485
            ) for i in range(3)])),
    }
    for i in range(2):
        name_node = 'name-{}-node'.format(i)
        expect['dfs.namenode.rpc-address.hdfs.{}'.format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9001)
        expect['dfs.namenode.http-address.hdfs.{}'.format(name_node)] = sdk_hosts.autoip_host(
            foldered_name, name_node, 9002)
    check_properties(hdfs_site, expect)
Example #7
0
def test_overlay_network_deployment_and_endpoints():
    endpoint_names = sdk_networks.get_endpoint_names(config.PACKAGE_NAME, config.SERVICE_NAME)
    assert set(["broker", "zookeeper"]) == set(endpoint_names)

    sdk_networks.check_endpoint_on_overlay(config.PACKAGE_NAME, config.SERVICE_NAME, "broker", config.DEFAULT_BROKER_COUNT)

    zookeeper = sdk_networks.get_endpoint_string(
        config.PACKAGE_NAME, config.SERVICE_NAME, "zookeeper"
    )
    assert zookeeper == "master.mesos:2181/{}".format(sdk_utils.get_zk_path(config.SERVICE_NAME))
Example #8
0
def _retried_run_janitor(service_name):
    auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip()

    cmd_list = ["docker", "run", "mesosphere/janitor", "/janitor.py",
                "-r", sdk_utils.get_role(service_name),
                "-p", service_name + '-principal',
                "-z", sdk_utils.get_zk_path(service_name),
                "--auth_token={}".format(auth_token)]

    sdk_cmd.master_ssh(" ".join(cmd_list))
Example #9
0
def test_overlay_network_deployment_and_endpoints():
    # double check
    sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
    endpoints = sdk_networks.get_and_test_endpoints(config.PACKAGE_NAME, config.SERVICE_NAME, "", 2)
    assert "broker" in endpoints, "broker is missing from endpoints {}".format(endpoints)
    assert "zookeeper" in endpoints, "zookeeper missing from endpoints {}".format(endpoints)
    broker_endpoints = sdk_networks.get_and_test_endpoints(config.PACKAGE_NAME, config.SERVICE_NAME, "broker", 3)
    sdk_networks.check_endpoints_on_overlay(broker_endpoints)

    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'endpoints zookeeper')
    assert zookeeper.rstrip() == 'master.mesos:2181/{}'.format(sdk_utils.get_zk_path(config.SERVICE_NAME))
Example #10
0
def test_lock():
    """This test verifies that a second scheduler fails to startup when
    an existing scheduler is running.  Without locking, the scheduler
    would fail during registration, but after writing its config to ZK.
    So in order to verify that the scheduler fails immediately, we ensure
    that the ZK config state is unmodified."""

    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)

    def get_zk_node_data(node_name):
        return sdk_cmd.cluster_request(
            "GET", "/exhibitor/exhibitor/v1/explorer/node-data?key={}".format(
                node_name)).json()

    # Get ZK state from running framework
    zk_path = "{}/ConfigTarget".format(sdk_utils.get_zk_path(foldered_name))
    zk_config_old = get_zk_node_data(zk_path)

    # Get marathon app
    marathon_config = sdk_marathon.get_config(foldered_name)
    old_timestamp = marathon_config.get("lastTaskFailure",
                                        {}).get("timestamp", None)

    # Scale to 2 instances
    labels = marathon_config["labels"]
    original_labels = labels.copy()
    labels.pop("MARATHON_SINGLE_INSTANCE_APP")
    sdk_marathon.update_app(marathon_config)
    marathon_config["instances"] = 2
    sdk_marathon.update_app(marathon_config,
                            wait_for_completed_deployment=False)

    @retrying.retry(wait_fixed=1000,
                    stop_max_delay=120 * 1000,
                    retry_on_result=lambda res: not res)
    def wait_for_second_scheduler_to_fail():
        timestamp = (sdk_marathon.get_config(foldered_name).get(
            "lastTaskFailure", {}).get("timestamp", None))
        return timestamp != old_timestamp

    wait_for_second_scheduler_to_fail()

    # Verify ZK is unchanged
    zk_config_new = get_zk_node_data(zk_path)
    assert zk_config_old == zk_config_new

    # In order to prevent the second scheduler instance from obtaining a lock, we undo the "scale-up" operation
    marathon_config["instances"] = 1
    marathon_config["labels"] = original_labels
    sdk_marathon.update_app(marathon_config, force=True)
def _retried_run_janitor(service_name):
    cmd_list = [
        "docker",
        "run",
        "mesosphere/janitor",
        "/janitor.py",
        "-r",
        sdk_utils.get_role(service_name),
        "-p",
        service_name + "-principal",
        "-z",
        sdk_utils.get_zk_path(service_name),
        "--auth_token={}".format(sdk_utils.dcos_token()),
    ]
    rc, _, _ = sdk_cmd.master_ssh(" ".join(cmd_list))
    assert rc == 0, "Janitor command failed"
Example #12
0
def test_overlay_network_deployment_and_endpoints():
    # double check
    sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
    endpoints = sdk_networks.get_and_test_endpoints(config.PACKAGE_NAME,
                                                    config.SERVICE_NAME, "", 2)
    assert "broker" in endpoints, "broker is missing from endpoints {}".format(
        endpoints)
    assert "zookeeper" in endpoints, "zookeeper missing from endpoints {}".format(
        endpoints)
    broker_endpoints = sdk_networks.get_and_test_endpoints(
        config.PACKAGE_NAME, config.SERVICE_NAME, "broker", 3)
    sdk_networks.check_endpoints_on_overlay(broker_endpoints)

    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME,
                                'endpoints zookeeper')
    assert zookeeper.rstrip() == 'master.mesos:2181/{}'.format(
        sdk_utils.get_zk_path(config.SERVICE_NAME))
Example #13
0
def test_lock():
    """This test verifies that a second scheduler fails to startup when
    an existing scheduler is running.  Without locking, the scheduler
    would fail during registration, but after writing its config to ZK.
    So in order to verify that the scheduler fails immediately, we ensure
    that the ZK config state is unmodified."""

    def get_zk_node_data(node_name):
        return sdk_cmd.cluster_request(
            "GET", "/exhibitor/exhibitor/v1/explorer/node-data?key={}".format(node_name)
        ).json()

    # Get ZK state from running framework
    zk_path = "{}/ConfigTarget".format(sdk_utils.get_zk_path(foldered_name))
    zk_config_old = get_zk_node_data(zk_path)

    # Get marathon app
    marathon_config = sdk_marathon.get_config(foldered_name)
    old_timestamp = marathon_config.get("lastTaskFailure", {}).get("timestamp", None)

    # Scale to 2 instances
    labels = marathon_config["labels"]
    original_labels = labels.copy()
    labels.pop("MARATHON_SINGLE_INSTANCE_APP")
    sdk_marathon.update_app(marathon_config)
    marathon_config["instances"] = 2
    sdk_marathon.update_app(marathon_config, wait_for_completed_deployment=False)

    @retrying.retry(wait_fixed=1000, stop_max_delay=120 * 1000, retry_on_result=lambda res: not res)
    def wait_for_second_scheduler_to_fail():
        timestamp = (
            sdk_marathon.get_config(foldered_name).get("lastTaskFailure", {}).get("timestamp", None)
        )
        return timestamp != old_timestamp

    wait_for_second_scheduler_to_fail()

    # Verify ZK is unchanged
    zk_config_new = get_zk_node_data(zk_path)
    assert zk_config_old == zk_config_new

    # In order to prevent the second scheduler instance from obtaining a lock, we undo the "scale-up" operation
    marathon_config["instances"] = 1
    marathon_config["labels"] = original_labels
    sdk_marathon.update_app(marathon_config, force=True)
Example #14
0
def run_janitor(service_name, role, service_account, znode):
    if role is None:
        role = sdk_utils.get_deslashed_service_name(service_name) + '-role'
    if service_account is None:
        service_account = service_name + '-principal'
    if znode is None:
        znode = sdk_utils.get_zk_path(service_name)

    auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token',
                                 print_output=False).strip()

    cmd_list = [
        "docker", "run", "mesosphere/janitor", "/janitor.py", "-r", role, "-p",
        service_account, "-z", znode, "--auth_token={}".format(auth_token)
    ]
    cmd = " ".join(cmd_list)

    sdk_cmd.master_ssh(cmd)
Example #15
0
def run_janitor(service_name, role, service_account, znode):
    if role is None:
        role = sdk_utils.get_deslashed_service_name(service_name) + '-role'
    if service_account is None:
        service_account = service_name + '-principal'
    if znode is None:
        znode = sdk_utils.get_zk_path(service_name)

    auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip()

    cmd_list = ["docker", "run", "mesosphere/janitor", "/janitor.py",
                "-r", role,
                "-p", service_account,
                "-z", znode,
                "--auth_token={}".format(auth_token)]
    cmd = " ".join(cmd_list)

    sdk_cmd.master_ssh(cmd)
Example #16
0
def test_custom_zookeeper():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    broker_ids = sdk_tasks.get_task_ids(foldered_name,
                                        "{}-".format(config.DEFAULT_POD_TYPE))

    # create a topic against the default zk:
    test_utils.create_topic(config.DEFAULT_TOPIC_NAME,
                            service_name=foldered_name)

    marathon_config = sdk_marathon.get_config(foldered_name)
    # should be using default path when this envvar is empty/unset:
    assert marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] == ""

    # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall:
    zk_path = "master.mesos:2181/{}/CUSTOMPATH".format(
        sdk_utils.get_zk_path(foldered_name))
    marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] = zk_path
    sdk_marathon.update_app(marathon_config)

    sdk_tasks.check_tasks_updated(foldered_name,
                                  "{}-".format(config.DEFAULT_POD_TYPE),
                                  broker_ids)
    sdk_plan.wait_for_completed_deployment(foldered_name)

    # wait for brokers to finish registering
    test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT,
                                  service_name=foldered_name)

    zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME,
                                                 foldered_name, "zookeeper")
    assert zookeeper == zk_path

    # topic created earlier against default zk should no longer be present:
    rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
                                    "topic list")
    assert rc == 0, "Topic list command failed"

    test_utils.assert_topic_lists_are_equal_without_automatic_topics(
        [], json.loads(stdout))
Example #17
0
def test_endpoints_zookeeper_default():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME,
                                                 foldered_name, "zookeeper")
    assert zookeeper == "master.mesos:2181/{}".format(
        sdk_utils.get_zk_path(foldered_name))
def test_endpoints_zookeeper_default():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    _, zookeeper, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name,
                                      "endpoints zookeeper")
    assert zookeeper.rstrip("\n") == "master.mesos:2181/{}".format(
        sdk_utils.get_zk_path(foldered_name))
Example #19
0
def test_endpoints_zookeeper_default():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
    assert zookeeper.rstrip('\n') == 'master.mesos:2181/{}'.format(sdk_utils.get_zk_path(foldered_name))
Example #20
0
def test_endpoints_zookeeper_default():
    foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME)
    zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper')
    assert zookeeper.rstrip('\n') == 'master.mesos:2181/{}'.format(sdk_utils.get_zk_path(foldered_name))
def test_endpoints_zookeeper_default():
    zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME,
                                                 FOLDERED_NAME, "zookeeper")
    assert zookeeper == "master.mesos:2181/{}".format(
        sdk_utils.get_zk_path(FOLDERED_NAME))
Example #22
0
import shakedown

import sdk_plan
import sdk_utils
import sdk_tasks

PACKAGE_NAME = 'hdfs'
FOLDERED_SERVICE_NAME = sdk_utils.get_foldered_name(PACKAGE_NAME)
DEFAULT_TASK_COUNT = 10  # 3 data nodes, 3 journal nodes, 2 name nodes, 2 zkfc nodes

ZK_SERVICE_PATH = sdk_utils.get_zk_path(PACKAGE_NAME)
TEST_CONTENT_SMALL = "This is some test data"
# use long-read alignments to human chromosome 1 as large file input (11GB)
TEST_CONTENT_LARGE_SOURCE = "http://s3.amazonaws.com/nanopore-human-wgs/chr1.sorted.bam"
TEST_FILE_1_NAME = "test_1"
TEST_FILE_2_NAME = "test_2"
DEFAULT_HDFS_TIMEOUT = 5 * 60
HDFS_POD_TYPES = {"journal", "name", "data"}


def write_data_to_hdfs(svc_name, filename, content_to_write=TEST_CONTENT_SMALL):
    write_command = "echo '{}' | ./bin/hdfs dfs -put - /{}".format(content_to_write, filename)
    rc, _ = run_hdfs_command(svc_name, write_command)
    # rc being True is effectively it being 0...
    return rc


def read_data_from_hdfs(svc_name, filename):
    read_command = "./bin/hdfs dfs -cat /{}".format(filename)
    rc, output = run_hdfs_command(svc_name, read_command)
    return rc and output.rstrip() == TEST_CONTENT_SMALL
Example #23
0
import pytest
import sdk_cmd
import sdk_hosts
import sdk_install as install
import sdk_marathon
import sdk_metrics
import sdk_plan
import sdk_tasks
import sdk_upgrade
import sdk_utils
import shakedown
from tests import config, test_utils

EPHEMERAL_TOPIC_NAME = 'topic_2'
FOLDERED_SERVICE_NAME = sdk_utils.get_foldered_name(config.SERVICE_NAME)
ZK_SERVICE_PATH = sdk_utils.get_zk_path(FOLDERED_SERVICE_NAME)


@pytest.fixture(scope='module', autouse=True)
def configure_package(configure_security):
    try:
        install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME)

        if shakedown.dcos_version_less_than("1.9"):
            # Last beta-kafka release (1.1.25-0.10.1.0-beta) excludes 1.8. Skip upgrade tests with 1.8 and just install
            install.install(
                config.PACKAGE_NAME,
                FOLDERED_SERVICE_NAME,
                config.DEFAULT_BROKER_COUNT,
                additional_options={"service": {"name": FOLDERED_SERVICE_NAME}})
        else: