Example #1
0
def test_custom_domain():
    task_count = 3
    custom_domain = sdk_hosts.get_crypto_id_domain()
    sdk_install.install(config.PACKAGE_NAME,
                        config.SERVICE_NAME,
                        task_count,
                        additional_options={
                            "service": {
                                "security": {
                                    "custom_domain": custom_domain
                                }
                            }
                        })

    # Verify the endpoint entry is correct
    assert set(["native-client"]) == set(
        sdk_networks.get_endpoint_names(config.PACKAGE_NAME,
                                        config.SERVICE_NAME))
    test_endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                              config.SERVICE_NAME,
                                              "native-client")
    assert set(["address", "dns"]) == set(test_endpoint.keys())

    assert len(test_endpoint["address"]) == task_count
    # Expect ip:port:
    for entry in test_endpoint["address"]:
        assert len(entry.split(":")) == 2

    assert len(test_endpoint["dns"]) == task_count
    # Expect custom domain:
    for entry in test_endpoint["dns"]:
        assert custom_domain in entry
Example #2
0
def setup_passwords(service_name: str = SERVICE_NAME,
                    task_name: str = "master-0-node",
                    https: bool = False) -> Union[bool, Dict[str, str]]:
    if https:
        master_0_node_dns = sdk_networks.get_endpoint(PACKAGE_NAME,
                                                      service_name,
                                                      "master-http")["dns"][0]
        url = "--url https://{}".format(master_0_node_dns)
    else:
        url = ""

    cmd = "\n".join([
        "set -x",
        "export JAVA_HOME=$(ls -d ${MESOS_SANDBOX}/jdk*/)",
        "ELASTICSEARCH_PATH=$(ls -d ${MESOS_SANDBOX}/elasticsearch-*/)",
        "${{ELASTICSEARCH_PATH}}/bin/elasticsearch-setup-passwords auto --batch --verbose {}"
        .format(url),
    ])

    full_cmd = "bash -c '{}'".format(cmd)
    _, stdout, _ = sdk_cmd.service_task_exec(service_name, task_name, full_cmd)

    elastic_password_search = re.search("PASSWORD elastic = (.*)", stdout)
    assert isinstance(elastic_password_search, Match)
    elastic_password = elastic_password_search.group(1)

    kibana_password_search = re.search("PASSWORD kibana = (.*)", stdout)
    assert isinstance(kibana_password_search, Match)
    kibana_password = kibana_password_search.group(1)

    if not elastic_password or not kibana_password:
        # Retry.
        return False

    return {"elastic": elastic_password, "kibana": kibana_password}
def test_custom_service_tld():
    task_count = 1
    custom_tld = sdk_hosts.get_crypto_id_domain()
    sdk_install.install(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        task_count,
        additional_options={
            "service": {
                "custom_service_tld": custom_tld,
                "yaml": "custom_tld"
            }
        },
    )

    # Verify the endpoint entry is correct
    assert set(["test"]) == set(
        sdk_networks.get_endpoint_names(config.PACKAGE_NAME,
                                        config.SERVICE_NAME))
    test_endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                              config.SERVICE_NAME, "test")
    assert set(["address", "dns"]) == set(test_endpoint.keys())

    assert len(test_endpoint["address"]) == task_count
    # Expect ip:port:
    for entry in test_endpoint["address"]:
        assert len(entry.split(":")) == 2

    assert len(test_endpoint["dns"]) == task_count
    # Expect custom tld:
    for entry in test_endpoint["dns"]:
        assert custom_tld in entry
Example #4
0
def kafka_server(zookeeper_service):
    try:

        # Get the zookeeper DNS values
        zookeeper_dns = sdk_networks.get_endpoint(
            zookeeper_service["package_name"],
            zookeeper_service["service"]["name"], "clientport")["dns"]

        sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)

        config.install(
            config.PACKAGE_NAME,
            config.SERVICE_NAME,
            config.DEFAULT_BROKER_COUNT,
            additional_options={
                "kafka": {
                    "kafka_zookeeper_uri": ",".join(zookeeper_dns)
                }
            },
        )

        # wait for brokers to finish registering before starting tests
        test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT,
                                      service_name=config.SERVICE_NAME)

        yield  # let the test session execute
    finally:
        sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
Example #5
0
def _configure_kafka_cluster(
        kafka_client: client.KafkaClient, zookeeper_service: typing.Dict,
        allow_access_if_no_acl: bool) -> client.KafkaClient:
    zookeeper_dns = sdk_networks.get_endpoint(
        zookeeper_service["package_name"],
        zookeeper_service["service"]["name"], "clientport")["dns"]

    sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
    service_options = _get_service_options(allow_access_if_no_acl,
                                           kafka_client.kerberos,
                                           zookeeper_dns)

    config.install(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        config.DEFAULT_BROKER_COUNT,
        additional_options=service_options,
    )

    kafka_server = {**service_options, **{"package_name": config.PACKAGE_NAME}}

    sdk_cmd.svc_cli(
        kafka_server["package_name"],
        kafka_server["service"]["name"],
        "topic create {}".format(TOPIC_NAME),
    )

    kafka_client.connect()

    # Clear the ACLs
    kafka_client.remove_acls("authorized", TOPIC_NAME)
    return kafka_client
Example #6
0
def test_endpoints():
    # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
    endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                          config.get_foldered_service_name(),
                                          "native-client")
    assert endpoints["dns"][0] == sdk_hosts.autoip_host(
        config.get_foldered_service_name(), "node-0-server", 9042)
    assert "vip" not in endpoints
Example #7
0
def test_endpoints() -> None:
    # Check that we can reach the scheduler via admin router, and that returned endpoints are
    # sanitized.
    for endpoint in config.ENDPOINT_TYPES:
        endpoints = sdk_networks.get_endpoint(package_name, service_name, endpoint)
        host = endpoint.split("-")[0]  # 'coordinator-http' => 'coordinator'
        assert endpoints["dns"][0].startswith(sdk_hosts.autoip_host(service_name, host + "-0-node"))
        assert endpoints["vip"].startswith(sdk_hosts.vip_host(service_name, host))

    sdk_plan.wait_for_completed_deployment(service_name)
    sdk_plan.wait_for_completed_recovery(service_name)
Example #8
0
def test_tls_endpoints():
    endpoint_names = sdk_networks.get_endpoint_names(config.PACKAGE_NAME,
                                                     config.SERVICE_NAME)
    assert len(endpoint_names) == 2
    assert BROKER_TLS_ENDPOINT in endpoint_names

    # Test that broker-tls endpoint is available
    endpoint_tls = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                             config.SERVICE_NAME,
                                             BROKER_TLS_ENDPOINT)
    assert len(endpoint_tls["dns"]) == config.DEFAULT_BROKER_COUNT
Example #9
0
def test_endpoints():
    # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized:
    for endpoint in config.ENDPOINT_TYPES:
        endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                              foldered_name, endpoint)
        host = endpoint.split("-")[0]  # 'coordinator-http' => 'coordinator'
        assert endpoints["dns"][0].startswith(
            sdk_hosts.autoip_host(foldered_name, host + "-0-node"))
        assert endpoints["vip"].startswith(
            sdk_hosts.vip_host(foldered_name, host))

    sdk_plan.wait_for_completed_deployment(foldered_name)
    sdk_plan.wait_for_completed_recovery(foldered_name)
def test_endpoints_address():
    endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME, FOLDERED_NAME,
                                          "broker")

    # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_
    assert len(endpoints["address"]) == config.DEFAULT_BROKER_COUNT
    assert len(endpoints["dns"]) == config.DEFAULT_BROKER_COUNT
    for i in range(len(endpoints["dns"])):
        assert (sdk_hosts.autoip_host(FOLDERED_NAME,
                                      "kafka-{}-broker".format(i))
                in endpoints["dns"][i])
    assert endpoints["vip"] == sdk_hosts.vip_host(FOLDERED_NAME, "broker",
                                                  9092)
Example #11
0
def test_tls_ciphers(kafka_service):
    task_name = "kafka-0-broker"
    endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                         config.SERVICE_NAME,
                                         BROKER_TLS_ENDPOINT)["dns"][0]
    ciphers_config_path = [
        "service", "security", "transport_encryption", "ciphers"
    ]
    rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME,
                                    "describe")
    assert rc == 0, "Describe command failed"
    expected_ciphers = set(
        sdk_utils.get_in(
            ciphers_config_path,
            json.loads(stdout),
            "",
        ).rstrip().split(","))

    openssl_ciphers = sdk_security.openssl_ciphers()
    missing_openssl_ciphers = cipher_suites.missing_openssl_ciphers(
        openssl_ciphers)
    possible_openssl_ciphers = openssl_ciphers - missing_openssl_ciphers
    enabled_ciphers = set()

    assert openssl_ciphers, "OpenSSL ciphers should be non-empty"
    assert expected_ciphers, "Expected ciphers should be non-empty"
    assert possible_openssl_ciphers, "Possible OpenSSL ciphers should be non-empty"

    # Output OpenSSL version.
    sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name,
                              "openssl version")
    log.warning("\n%s OpenSSL ciphers missing from the cipher_suites module:",
                len(missing_openssl_ciphers))
    log.warning("\n".join(to_sorted(list(missing_openssl_ciphers))))
    log.info("\n%s expected ciphers:", len(expected_ciphers))
    log.info("\n".join(to_sorted(list(expected_ciphers))))
    log.info("\n%s ciphers will be checked:", len(possible_openssl_ciphers))
    for openssl_cipher in to_sorted(list(possible_openssl_ciphers)):
        log.info("%s (%s)", cipher_suites.rfc_name(openssl_cipher),
                 openssl_cipher)

    for openssl_cipher in possible_openssl_ciphers:
        if sdk_security.is_cipher_enabled(config.SERVICE_NAME, task_name,
                                          openssl_cipher, endpoint):
            enabled_ciphers.add(cipher_suites.rfc_name(openssl_cipher))

    log.info("%s ciphers enabled out of %s:", len(enabled_ciphers),
             len(possible_openssl_ciphers))
    log.info("\n".join(to_sorted(list(enabled_ciphers))))

    assert expected_ciphers == enabled_ciphers, "Enabled ciphers should match expected ciphers"
def wait_for_broker_dns(package_name: str, service_name: str):
    brokers = sdk_networks.get_endpoint(package_name, service_name, "broker")
    broker_dns = list(map(lambda x: x.split(":")[0], brokers["dns"]))

    def get_scheduler_task_id(service_name: str) -> str:
        for task in sdk_tasks.get_summary():
            if task.name == service_name:
                return task.id

    scheduler_task_id = get_scheduler_task_id(service_name)
    log.info("Scheduler task ID: %s", scheduler_task_id)
    log.info("Waiting for brokers: %s", broker_dns)

    assert sdk_cmd.resolve_hosts(scheduler_task_id, broker_dns)
def kafka_client(kerberos, kafka_server):

    brokers = sdk_networks.get_endpoint(kafka_server["package_name"],
                                        kafka_server["service"]["name"],
                                        "broker")["dns"]

    try:
        client_id = "kafka-client"
        client = {
            "id": client_id,
            "mem": 512,
            "container": {
                "type":
                "MESOS",
                "docker": {
                    "image": "elezar/kafka-client:4b9c060",
                    "forcePullImage": True
                },
                "volumes": [{
                    "containerPath": "/tmp/kafkaconfig/kafka-client.keytab",
                    "secret": "kafka_keytab",
                }],
            },
            "secrets": {
                "kafka_keytab": {
                    "source": kerberos.get_keytab_path()
                }
            },
            "networks": [{
                "mode": "host"
            }],
            "env": {
                "JVM_MaxHeapSize": "512",
                "KAFKA_CLIENT_MODE": "test",
                "KAFKA_TOPIC": "securetest",
                "KAFKA_BROKER_LIST": ",".join(brokers),
            },
        }

        sdk_marathon.install_app(client)
        yield {
            **client,
            **{
                "brokers": list(map(lambda x: x.split(":")[0], brokers))
            }
        }

    finally:
        sdk_marathon.destroy_app(client_id)
Example #14
0
def _master_zero_http_port(service_name):
    '''Returns a master node hostname+port endpoint that can be queried from within the cluster.
    We cannot cache this value because while the hostnames remain static, the ports are dynamic and may change if the master is replaced.
    '''
    dns = sdk_networks.get_endpoint(PACKAGE_NAME, service_name,
                                    "master-http")["dns"]
    # 'dns' array will look something like this in CCM: [
    #   "master-0-node.[svcname].[...autoip...]:1027",
    #   "master-1-node.[svcname].[...autoip...]:1026",
    #   "master-2-node.[svcname].[...autoip...]:1025"
    # ]

    port = dns[0].split(":")[-1]
    log.info("Extracted {} as port for {}".format(port, dns[0]))
    return port
Example #15
0
def _master_zero_http_port(service_name: str) -> int:
    """Returns a master node hostname+port endpoint that can be queried from within the cluster. We
    cannot cache this value because while the hostnames remain static, the ports are dynamic and may
    change if the master is replaced.

    """
    dns = sdk_networks.get_endpoint(PACKAGE_NAME, service_name, "master-http")["dns"]
    # 'dns' array will look something like this in CCM: [
    #   "master-0-node.[svcname].[...autoip...]:1027",
    #   "master-1-node.[svcname].[...autoip...]:1026",
    #   "master-2-node.[svcname].[...autoip...]:1025"
    # ]

    port = dns[0].split(":")[-1]
    log.info("Extracted {} as port for {}".format(port, dns[0]))
    return int(port)
def kafka_server(kerberos, zookeeper_server):

    # Get the zookeeper DNS values
    zookeeper_dns = sdk_networks.get_endpoint(
        zookeeper_server["package_name"], zookeeper_server["service"]["name"],
        "clientport")["dns"]

    service_kerberos_options = {
        "service": {
            "name": config.SERVICE_NAME,
            "security": {
                "kerberos": {
                    "enabled": True,
                    "enabled_for_zookeeper": True,
                    "kdc": {
                        "hostname": kerberos.get_host(),
                        "port": int(kerberos.get_port())
                    },
                    "realm": kerberos.get_realm(),
                    "keytab_secret": kerberos.get_keytab_path(),
                }
            },
        },
        "kafka": {
            "kafka_zookeeper_uri": ",".join(zookeeper_dns)
        },
    }

    sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
    try:
        sdk_install.install(
            config.PACKAGE_NAME,
            config.SERVICE_NAME,
            config.DEFAULT_BROKER_COUNT,
            additional_options=service_kerberos_options,
            timeout_seconds=30 * 60,
        )

        yield {
            **service_kerberos_options,
            **{
                "package_name": config.PACKAGE_NAME
            }
        }
    finally:
        sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
Example #17
0
def test_overlay_network_deployment_and_endpoints():
    # double check
    sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT)
    endpoints = sdk_networks.get_endpoint_names(config.PACKAGE_NAME,
                                                config.SERVICE_NAME)
    assert "broker" in endpoints, "broker is missing from endpoints {}".format(
        endpoints)
    assert "zookeeper" in endpoints, "zookeeper missing from endpoints {}".format(
        endpoints)
    broker_endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                                 config.SERVICE_NAME, "broker")
    kafka_networks.check_endpoints_on_overlay(broker_endpoints)

    _, zookeeper, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME,
                                      "endpoints zookeeper")
    assert zookeeper.rstrip() == "master.mesos:2181/{}".format(
        sdk_utils.get_zk_path(config.SERVICE_NAME))
Example #18
0
def setup_passwords(
    service_name: str = SERVICE_NAME, task_name: str = "master-0-node", https: bool = False
) -> Union[bool, Dict[str, str]]:
    if https:
        master_0_node_dns = sdk_networks.get_endpoint(PACKAGE_NAME, service_name, "master-http")[
            "dns"
        ][0]
        url = "--url https://{}".format(master_0_node_dns)
    else:
        url = ""

    cmd = "\n".join(
        [
            "set -x",
            "export JAVA_HOME=$(ls -d ${MESOS_SANDBOX}/jdk*/jre/)",
            "ELASTICSEARCH_PATH=$(ls -d ${MESOS_SANDBOX}/elasticsearch-*/)",
            "${{ELASTICSEARCH_PATH}}/bin/elasticsearch-setup-passwords auto --batch --verbose {}".format(
                url
            ),
        ]
    )

    full_cmd = "bash -c '{}'".format(cmd)
    _, stdout, _ = sdk_cmd.service_task_exec(service_name, task_name, full_cmd)

    elastic_password_search = re.search("PASSWORD elastic = (.*)", stdout)
    assert isinstance(elastic_password_search, Match)
    elastic_password = elastic_password_search.group(1)

    kibana_password_search = re.search("PASSWORD kibana = (.*)", stdout)
    assert isinstance(kibana_password_search, Match)
    kibana_password = kibana_password_search.group(1)

    if not elastic_password or not kibana_password:
        # Retry.
        return False

    return {"elastic": elastic_password, "kibana": kibana_password}
def _configure_kafka_cluster(
        kafka_client: client.KafkaClient, zookeeper_service: typing.Dict,
        allow_access_if_no_acl: bool) -> client.KafkaClient:
    zookeeper_dns = sdk_networks.get_endpoint(
        zookeeper_service["package_name"],
        zookeeper_service["service"]["name"], "clientport")["dns"]

    sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
    service_options = _get_service_options(allow_access_if_no_acl,
                                           kafka_client.kerberos,
                                           zookeeper_dns)

    config.install(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        config.DEFAULT_BROKER_COUNT,
        additional_options=service_options,
    )

    kafka_client.connect(config.DEFAULT_BROKER_COUNT)

    # Clear the ACLs
    return kafka_client
def test_custom_service_tld():
    task_count = 1
    custom_tld = sdk_hosts.get_crypto_id_domain()
    sdk_install.install(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        task_count,
        additional_options={"service": {"custom_service_tld": custom_tld, "yaml": "custom_tld"}},
    )

    # Verify the endpoint entry is correct
    assert set(["test"]) == set(sdk_networks.get_endpoint_names(config.PACKAGE_NAME, config.SERVICE_NAME))
    test_endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME, config.SERVICE_NAME, "test")
    assert set(["address", "dns"]) == set(test_endpoint.keys())

    assert len(test_endpoint["address"]) == task_count
    # Expect ip:port:
    for entry in test_endpoint["address"]:
        assert len(entry.split(":")) == 2

    assert len(test_endpoint["dns"]) == task_count
    # Expect custom tld:
    for entry in test_endpoint["dns"]:
        assert custom_tld in entry
Example #21
0
def test_overlay_network():
    """Verify that the current deploy plan matches the expected plan from the spec."""

    deployment_plan = sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)
    log.info(sdk_plan.plan_string("deploy", deployment_plan))

    # test that the tasks are all up, which tests the overlay DNS
    framework_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME)

    expected_running_tasks = [
        "overlay-vip-0-server",
        "overlay-0-server",
        "host-vip-0-server",
        "host-0-server"
    ]
    assert set(expected_running_tasks) == set([t.name for t in framework_tasks])

    for task in framework_tasks:
        name = task.name
        if name.startswith("host-"):
            assert "ports" in task.resources.keys(), "Task {} should have port resources".format(
                name
            )
            sdk_networks.check_task_network(name, expected_network_name=None)
        elif name.startswith("overlay-"):
            assert (
                "ports" not in task.resources.keys()
            ), "Task {} should NOT have port resources".format(
                name
            )
            sdk_networks.check_task_network(name)
        else:
            assert False, "Unknown task {}".format(name)

    endpoints_result = sdk_networks.get_endpoint_names(config.PACKAGE_NAME, config.SERVICE_NAME)
    assert len(endpoints_result) == 2, "Expected 2 endpoints, got: {}".format(endpoints_result)

    overlay_endpoints_result = sdk_networks.get_endpoint(
        config.PACKAGE_NAME, config.SERVICE_NAME, "overlay-vip"
    )
    assert "address" in overlay_endpoints_result.keys(), (
        "overlay endpoints missing 'address': {}".format(overlay_endpoints_result)
    )
    assert len(overlay_endpoints_result["address"]) == 1
    assert overlay_endpoints_result["address"][0].startswith("9")
    overlay_port = overlay_endpoints_result["address"][0].split(":")[-1]
    assert overlay_port == "4044"
    assert "dns" in overlay_endpoints_result.keys()
    assert len(overlay_endpoints_result["dns"]) == 1
    assert overlay_endpoints_result["dns"][0] == sdk_hosts.autoip_host(
        config.SERVICE_NAME, "overlay-vip-0-server", 4044
    )

    host_endpoints_result = sdk_networks.get_endpoint(
        config.PACKAGE_NAME, config.SERVICE_NAME, "host-vip"
    )
    assert "address" in host_endpoints_result.keys(), (
        "overlay endpoints missing 'address'" "{}".format(host_endpoints_result)
    )
    assert len(host_endpoints_result["address"]) == 1
    assert host_endpoints_result["address"][0].startswith("10")
    host_port = host_endpoints_result["address"][0].split(":")[-1]
    assert host_port == "4044"
    assert "dns" in host_endpoints_result.keys()
    assert len(host_endpoints_result["dns"]) == 1
    assert host_endpoints_result["dns"][0] == sdk_hosts.autoip_host(
        config.SERVICE_NAME, "host-vip-0-server", 4044
    )
Example #22
0
def service_get_brokers(kafka_server: dict, endpoint_name: str) -> list:
    return sdk_networks.get_endpoint(kafka_server["package_name"],
                                     kafka_server["service"]["name"],
                                     endpoint_name)["dns"]
Example #23
0
def test_overlay_network():
    """Verify that the current deploy plan matches the expected plan from the spec."""

    deployment_plan = sdk_plan.wait_for_completed_deployment(
        config.SERVICE_NAME)
    log.info(sdk_plan.plan_string("deploy", deployment_plan))

    # test that the tasks are all up, which tests the overlay DNS
    framework_tasks = sdk_tasks.get_service_tasks(config.SERVICE_NAME)

    expected_running_tasks = [
        "overlay-vip-0-server",
        "overlay-0-server",
        "host-vip-0-server",
        "host-0-server",
    ]
    assert set(expected_running_tasks) == set(
        [t.name for t in framework_tasks])

    for task in framework_tasks:
        name = task.name
        if name.startswith("host-"):
            assert "ports" in task.resources.keys(
            ), "Task {} should have port resources".format(name)
            sdk_networks.check_task_network(name, expected_network_name=None)
        elif name.startswith("overlay-"):
            assert ("ports" not in task.resources.keys()
                    ), "Task {} should NOT have port resources".format(name)
            sdk_networks.check_task_network(name)
        else:
            assert False, "Unknown task {}".format(name)

    endpoints_result = sdk_networks.get_endpoint_names(config.PACKAGE_NAME,
                                                       config.SERVICE_NAME)
    assert len(endpoints_result) == 2, "Expected 2 endpoints, got: {}".format(
        endpoints_result)

    overlay_endpoints_result = sdk_networks.get_endpoint(
        config.PACKAGE_NAME, config.SERVICE_NAME, "overlay-vip")
    assert ("address" in overlay_endpoints_result.keys()
            ), "overlay endpoints missing 'address': {}".format(
                overlay_endpoints_result)
    assert len(overlay_endpoints_result["address"]) == 1
    assert overlay_endpoints_result["address"][0].startswith("9")
    overlay_port = overlay_endpoints_result["address"][0].split(":")[-1]
    assert overlay_port == "4044"
    assert "dns" in overlay_endpoints_result.keys()
    assert len(overlay_endpoints_result["dns"]) == 1
    assert overlay_endpoints_result["dns"][0] == sdk_hosts.autoip_host(
        config.SERVICE_NAME, "overlay-vip-0-server", 4044)

    host_endpoints_result = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                                      config.SERVICE_NAME,
                                                      "host-vip")
    assert (
        "address" in host_endpoints_result.keys()
    ), "overlay endpoints missing 'address'" "{}".format(host_endpoints_result)
    assert len(host_endpoints_result["address"]) == 1
    assert host_endpoints_result["address"][0].startswith("10")
    host_port = host_endpoints_result["address"][0].split(":")[-1]
    assert host_port == "4044"
    assert "dns" in host_endpoints_result.keys()
    assert len(host_endpoints_result["dns"]) == 1
    assert host_endpoints_result["dns"][0] == sdk_hosts.autoip_host(
        config.SERVICE_NAME, "host-vip-0-server", 4044)
def test_authz_acls_not_required(kafka_client: client.KafkaClient,
                                 zookeeper_server, kerberos):
    try:
        zookeeper_dns = sdk_networks.get_endpoint(
            zookeeper_server["package_name"],
            zookeeper_server["service"]["name"], "clientport")["dns"]

        sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
        service_options = {
            "service": {
                "name": config.SERVICE_NAME,
                "security": {
                    "kerberos": {
                        "enabled": True,
                        "enabled_for_zookeeper": True,
                        "kdc": {
                            "hostname": kerberos.get_host(),
                            "port": int(kerberos.get_port())
                        },
                        "realm": kerberos.get_realm(),
                        "keytab_secret": kerberos.get_keytab_path(),
                    },
                    "authorization": {
                        "enabled": True,
                        "super_users": "User:{}".format("super"),
                        "allow_everyone_if_no_acl_found": True,
                    },
                },
            },
            "kafka": {
                "kafka_zookeeper_uri": ",".join(zookeeper_dns)
            },
        }

        config.install(
            config.PACKAGE_NAME,
            config.SERVICE_NAME,
            config.DEFAULT_BROKER_COUNT,
            additional_options=service_options,
        )

        kafka_server = {
            **service_options,
            **{
                "package_name": config.PACKAGE_NAME
            }
        }

        topic_name = "authz.test"
        sdk_cmd.svc_cli(
            kafka_server["package_name"],
            kafka_server["service"]["name"],
            "topic create {}".format(topic_name),
        )

        kafka_client.connect(kafka_server)

        # Clear the ACLs
        kafka_client.remove_acls("authorized", kafka_server, topic_name)

        # Since no ACLs are specified, all users can read and write.
        for user in ["authorized", "unauthorized", "super"]:
            log.info("Checking write / read permissions for user=%s", user)
            write_success, read_successes, _ = kafka_client.can_write_and_read(
                user, kafka_server, topic_name, kerberos)
            assert write_success, "Write failed (user={})".format(user)
            assert read_successes, ("Read failed (user={}): "
                                    "MESSAGES={} "
                                    "read_successes={}".format(
                                        user, kafka_client.MESSAGES,
                                        read_successes))

        log.info("Writing and reading: Adding acl for authorized user")
        kafka_client.add_acls("authorized", kafka_server, topic_name)

        # After adding ACLs the authorized user and super user should still have access to the topic.
        for user in ["authorized", "super"]:
            log.info("Checking write / read permissions for user=%s", user)
            write_success, read_successes, _ = kafka_client.can_write_and_read(
                user, kafka_server, topic_name, kerberos)
            assert write_success, "Write failed (user={})".format(user)
            assert read_successes, ("Read failed (user={}): "
                                    "MESSAGES={} "
                                    "read_successes={}".format(
                                        user, kafka_client.MESSAGES,
                                        read_successes))

        for user in ["unauthorized"]:
            log.info("Checking lack of write / read permissions for user=%s",
                     user)
            write_success, _, read_messages = kafka_client.can_write_and_read(
                user, kafka_server, topic_name, kerberos)
            assert not write_success, "Write not expected to succeed (user={})".format(
                user)
            assert auth.is_not_authorized(
                read_messages), "Unauthorized expected (user={}".format(user)

    finally:
        # Ensure that we clean up the ZK state.
        kafka_client.remove_acls("authorized", kafka_server, topic_name)

        sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
Example #25
0
 def get_endpoint_dns(self, endpoint_name: str) -> list:
     return sdk_networks.get_endpoint(self._package_name,
                                      self._service_name,
                                      endpoint_name)["dns"]
Example #26
0
def test_no_vip(kafka_server):
    broker_endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                                config.SERVICE_NAME, "broker")
    assert "vip" not in broker_endpoint
Example #27
0
def test_no_vip(kafka_server):
    broker_endpoint = sdk_networks.get_endpoint(
        kafka_server["package_name"], kafka_server["service"]["name"], "broker"
    )
    assert "vip" not in broker_endpoint