Esempio n. 1
0
def test_tls_ciphers(kafka_service_tls):
    task_name = 'kafka-0-broker'
    endpoint = sdk_cmd.svc_cli(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        'endpoints {}'.format(BROKER_TLS_ENDPOINT),
        json=True)['dns'][0]
    ciphers_config_path = ['service', 'security', 'transport_encryption', 'ciphers']
    expected_ciphers = set(sdk_utils.get_in(ciphers_config_path, sdk_cmd.svc_cli(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        'describe',
        json=True), '').rstrip().split(','))
    possible_ciphers = set(map(cipher_suites.rfc_name, sdk_security.openssl_ciphers()))
    enabled_ciphers = set()

    assert expected_ciphers, 'Expected ciphers should be non-empty'
    assert possible_ciphers, 'Possible ciphers should be non-empty'

    sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name, 'openssl version')  # Output OpenSSL version.
    print("\nExpected ciphers:")
    print("\n".join(sdk_utils.sort(list(expected_ciphers))))
    print("\n{} ciphers will be checked:".format(len(possible_ciphers)))
    print("\n".join(sdk_utils.sort(list(possible_ciphers))))

    for cipher in possible_ciphers:
        openssl_cipher = cipher_suites.openssl_name(cipher)
        if sdk_security.is_cipher_enabled(config.SERVICE_NAME, task_name, openssl_cipher, endpoint):
            enabled_ciphers.add(cipher)

    print('{} ciphers enabled out of {}:'.format(len(enabled_ciphers), len(possible_ciphers)))
    print("\n".join(sdk_utils.sort(list(enabled_ciphers))))

    assert expected_ciphers == enabled_ciphers, "Enabled ciphers should match expected ciphers"
Esempio n. 2
0
def test_changing_discovery_replaces_certificate_sans():
    """
    Update service configuration to change discovery prefix of a task.
    Scheduler should update task and new SANs should be generated.
    """

    # Load end-entity certificate from PEM encoded file
    _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME,
                                             'discovery-0-node',
                                             'cat server.crt')
    log.info('first server.crt: {}'.format(stdout))

    ascii_cert = stdout.encode('ascii')
    log.info('first server.crt ascii encoded: {}'.format(ascii_cert))

    end_entity_cert = x509.load_pem_x509_certificate(ascii_cert,
                                                     DEFAULT_BACKEND)

    san_extension = end_entity_cert.extensions.get_extension_for_oid(
        ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
    sans = [
        san.value for san in san_extension.value._general_names._general_names
    ]

    expected_san = (
        '{name}-0.{service_name}.autoip.dcos.thisdcos.directory'.format(
            name=DISCOVERY_TASK_PREFIX, service_name=config.SERVICE_NAME))
    assert expected_san in sans

    # Run task update with new discovery prefix
    marathon_config = sdk_marathon.get_config(config.SERVICE_NAME)
    marathon_config['env'][
        'DISCOVERY_TASK_PREFIX'] = DISCOVERY_TASK_PREFIX + '-new'
    sdk_marathon.update_app(config.SERVICE_NAME, marathon_config)
    sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)

    _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME,
                                             'discovery-0-node',
                                             'cat server.crt')
    log.info('second server.crt: {}'.format(stdout))

    ascii_cert = stdout.encode('ascii')
    log.info('second server.crt ascii encoded: {}'.format(ascii_cert))
    new_cert = x509.load_pem_x509_certificate(ascii_cert, DEFAULT_BACKEND)

    san_extension = new_cert.extensions.get_extension_for_oid(
        ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
    sans = [
        san.value for san in san_extension.value._general_names._general_names
    ]

    expected_san = (
        '{name}-0.{service_name}.autoip.dcos.thisdcos.directory'.format(
            name=DISCOVERY_TASK_PREFIX + '-new',
            service_name=config.SERVICE_NAME))
    assert expected_san in sans
Esempio n. 3
0
def test_tls_ciphers(kafka_service):
    task_name = "kafka-0-broker"
    endpoint = sdk_networks.get_endpoint(config.PACKAGE_NAME,
                                         config.SERVICE_NAME,
                                         BROKER_TLS_ENDPOINT)["dns"][0]
    ciphers_config_path = [
        "service", "security", "transport_encryption", "ciphers"
    ]
    rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME,
                                    "describe")
    assert rc == 0, "Describe command failed"
    expected_ciphers = set(
        sdk_utils.get_in(
            ciphers_config_path,
            json.loads(stdout),
            "",
        ).rstrip().split(","))

    openssl_ciphers = sdk_security.openssl_ciphers()
    missing_openssl_ciphers = cipher_suites.missing_openssl_ciphers(
        openssl_ciphers)
    possible_openssl_ciphers = openssl_ciphers - missing_openssl_ciphers
    enabled_ciphers = set()

    assert openssl_ciphers, "OpenSSL ciphers should be non-empty"
    assert expected_ciphers, "Expected ciphers should be non-empty"
    assert possible_openssl_ciphers, "Possible OpenSSL ciphers should be non-empty"

    # Output OpenSSL version.
    sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name,
                              "openssl version")
    log.warning("\n%s OpenSSL ciphers missing from the cipher_suites module:",
                len(missing_openssl_ciphers))
    log.warning("\n".join(to_sorted(list(missing_openssl_ciphers))))
    log.info("\n%s expected ciphers:", len(expected_ciphers))
    log.info("\n".join(to_sorted(list(expected_ciphers))))
    log.info("\n%s ciphers will be checked:", len(possible_openssl_ciphers))
    for openssl_cipher in to_sorted(list(possible_openssl_ciphers)):
        log.info("%s (%s)", cipher_suites.rfc_name(openssl_cipher),
                 openssl_cipher)

    for openssl_cipher in possible_openssl_ciphers:
        if sdk_security.is_cipher_enabled(config.SERVICE_NAME, task_name,
                                          openssl_cipher, endpoint):
            enabled_ciphers.add(cipher_suites.rfc_name(openssl_cipher))

    log.info("%s ciphers enabled out of %s:", len(enabled_ciphers),
             len(possible_openssl_ciphers))
    log.info("\n".join(to_sorted(list(enabled_ciphers))))

    assert expected_ciphers == enabled_ciphers, "Enabled ciphers should match expected ciphers"
Esempio n. 4
0
def test_changing_discovery_replaces_certificate_sans():
    """
    Update service configuration to change discovery prefix of a task.
    Scheduler should update task and new SANs should be generated.
    """

    # Load end-entity certificate from PEM encoded file
    _, stdout, _ = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, "discovery-0-node", "cat server.crt"
    )
    log.info("first server.crt: {}".format(stdout))

    ascii_cert = stdout.encode("ascii")
    log.info("first server.crt ascii encoded: {}".format(ascii_cert))

    end_entity_cert = x509.load_pem_x509_certificate(ascii_cert, DEFAULT_BACKEND)

    san_extension = end_entity_cert.extensions.get_extension_for_oid(
        ExtensionOID.SUBJECT_ALTERNATIVE_NAME
    )
    sans = [san.value for san in san_extension.value._general_names._general_names]

    expected_san = "{name}-0.{service_name}.autoip.dcos.thisdcos.directory".format(
        name=DISCOVERY_TASK_PREFIX, service_name=config.SERVICE_NAME
    )
    assert expected_san in sans

    # Run task update with new discovery prefix
    marathon_config = sdk_marathon.get_config(config.SERVICE_NAME)
    marathon_config["env"]["DISCOVERY_TASK_PREFIX"] = DISCOVERY_TASK_PREFIX + "-new"
    sdk_marathon.update_app(marathon_config)
    sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME)

    _, stdout, _ = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, "discovery-0-node", "cat server.crt"
    )
    log.info("second server.crt: {}".format(stdout))

    ascii_cert = stdout.encode("ascii")
    log.info("second server.crt ascii encoded: {}".format(ascii_cert))
    new_cert = x509.load_pem_x509_certificate(ascii_cert, DEFAULT_BACKEND)

    san_extension = new_cert.extensions.get_extension_for_oid(ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
    sans = [san.value for san in san_extension.value._general_names._general_names]

    expected_san = "{name}-0.{service_name}.autoip.dcos.thisdcos.directory".format(
        name=DISCOVERY_TASK_PREFIX + "-new", service_name=config.SERVICE_NAME
    )
    assert expected_san in sans
Esempio n. 5
0
def test_tls_ciphers(kafka_service_tls):
    task_name = 'kafka-0-broker'
    endpoint = sdk_cmd.svc_cli(config.PACKAGE_NAME,
                               config.SERVICE_NAME,
                               'endpoints {}'.format(BROKER_TLS_ENDPOINT),
                               json=True)['dns'][0]
    ciphers_config_path = [
        'service', 'security', 'transport_encryption', 'ciphers'
    ]
    expected_ciphers = set(
        sdk_utils.get_in(
            ciphers_config_path,
            sdk_cmd.svc_cli(config.PACKAGE_NAME,
                            config.SERVICE_NAME,
                            'describe',
                            json=True), '').rstrip().split(','))

    openssl_ciphers = sdk_security.openssl_ciphers()
    missing_openssl_ciphers = cipher_suites.missing_openssl_ciphers(
        openssl_ciphers)
    possible_openssl_ciphers = openssl_ciphers - missing_openssl_ciphers
    enabled_ciphers = set()

    assert openssl_ciphers, 'OpenSSL ciphers should be non-empty'
    assert expected_ciphers, 'Expected ciphers should be non-empty'
    assert possible_openssl_ciphers, 'Possible OpenSSL ciphers should be non-empty'

    sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name,
                              'openssl version')  # Output OpenSSL version.
    log.warning("\n%s OpenSSL ciphers missing from the cipher_suites module:",
                len(missing_openssl_ciphers))
    log.warning("\n".join(sdk_utils.sort(list(missing_openssl_ciphers))))
    log.info("\n%s expected ciphers:", len(expected_ciphers))
    log.info("\n".join(sdk_utils.sort(list(expected_ciphers))))
    log.info("\n%s ciphers will be checked:", len(possible_openssl_ciphers))
    for openssl_cipher in sdk_utils.sort(list(possible_openssl_ciphers)):
        log.info("%s (%s)", cipher_suites.rfc_name(openssl_cipher),
                 openssl_cipher)

    for openssl_cipher in possible_openssl_ciphers:
        if sdk_security.is_cipher_enabled(config.SERVICE_NAME, task_name,
                                          openssl_cipher, endpoint):
            enabled_ciphers.add(cipher_suites.rfc_name(openssl_cipher))

    log.info('%s ciphers enabled out of %s:', len(enabled_ciphers),
             len(possible_openssl_ciphers))
    log.info("\n".join(sdk_utils.sort(list(enabled_ciphers))))

    assert expected_ciphers == enabled_ciphers, "Enabled ciphers should match expected ciphers"
Esempio n. 6
0
def verify_shared_executor(
    pod_name, expected_files=["essential", "nonessential"], delete_files=True
):
    """verify that both tasks share the same executor:
    - matching ExecutorInfo
    - both 'essential' and 'nonessential' present in shared-volume/ across both tasks
    """
    rc, stdout, _ = sdk_cmd.svc_cli(
        config.PACKAGE_NAME, config.SERVICE_NAME, "pod info {}".format(pod_name), print_output=False
    )
    assert rc == 0, "Pod info failed"
    try:
        tasks = json.loads(stdout)
    except Exception:
        log.exception("Failed to parse pod info: {}".format(stdout))
        assert False, "Failed to parse pod info, see above"
    assert len(tasks) == 2, "Expected 2 tasks: {}".format(stdout)

    # check that the task executors all match
    executor = tasks[0]["info"]["executor"]
    for task in tasks[1:]:
        assert executor == task["info"]["executor"]

    # for each task, check shared volume content matches what's expected
    task_names = [task["info"]["name"] for task in tasks]
    for task_name in task_names:
        # 1.9 just uses the host filesystem in 'task exec', so use 'task ls' across the board instead
        filenames = sdk_cmd.run_cli("task ls {} shared-volume/".format(task_name))[1].strip().split()
        assert set(expected_files) == set(filenames)

    # delete files from volume in preparation for a following task relaunch
    if delete_files:
        if sdk_utils.dcos_version_less_than("1.10"):
            # 1.9 just uses the host filesystem in 'task exec', so figure out the absolute volume path manually
            expected_file_path = sdk_cmd.service_task_exec(
                config.SERVICE_NAME,
                task_names[0],
                "find /var/lib/mesos/slave/volumes -iname " + filenames[0],
            )[1].strip()
            # volume dir is parent of the expected file path.
            volume_dir = os.path.dirname(expected_file_path)
        else:
            # 1.10+ works correctly: path is relative to sandbox
            volume_dir = "shared-volume/"
        sdk_cmd.service_task_exec(
            config.SERVICE_NAME,
            task_names[0],
            "rm " + " ".join([os.path.join(volume_dir, name) for name in filenames]),
        )
Esempio n. 7
0
    def write_metric_to_statsd_counter(metric_name: str, value: int):
        """
        Write a metric with the specified value to statsd.

        This is done by echoing the statsd string through ncat to the statsd host an port.
        """
        metric_echo = 'echo \\"{}:{}|c\\"'.format(metric_name, value)
        ncat_command = 'ncat -w 1 -u \\$STATSD_UDP_HOST \\$STATSD_UDP_PORT'
        pipe = " | "

        bash_command = sdk_cmd.get_bash_command(
            metric_echo + pipe + ncat_command,
            environment=None,
        )
        sdk_cmd.service_task_exec(configure_package["service"]["name"], "hello-0-server", bash_command)
Esempio n. 8
0
    def write_metric_to_statsd_counter(metric_name: str, value: int):
        """
        Write a metric with the specified value to statsd.

        This is done by echoing the statsd string through ncat to the statsd host an port.
        """
        metric_echo = 'echo \\"{}:{}|c\\"'.format(metric_name, value)
        ncat_command = "ncat -w 1 -u \\$STATSD_UDP_HOST \\$STATSD_UDP_PORT"
        pipe = " | "

        bash_command = sdk_cmd.get_bash_command(metric_echo + pipe +
                                                ncat_command,
                                                environment=None)
        sdk_cmd.service_task_exec(configure_package["service"]["name"],
                                  "hello-0-server", bash_command)
Esempio n. 9
0
def setup_passwords(
    service_name: str = SERVICE_NAME,
    task_name: str = "master-0-node",
) -> Union[bool, Dict[str, str]]:
    cmd = "\n".join([
        "set -x",
        "export JAVA_HOME=$(ls -d ${MESOS_SANDBOX}/jdk*/jre/)",
        "ELASTICSEARCH_PATH=$(ls -d ${MESOS_SANDBOX}/elasticsearch-*/)",
        "${ELASTICSEARCH_PATH}/bin/elasticsearch-setup-passwords auto --batch --verbose",
    ])
    full_cmd = "bash -c '{}'".format(cmd)
    _, stdout, _ = sdk_cmd.service_task_exec(service_name, task_name, full_cmd)

    elastic_password_search = re.search("PASSWORD elastic = (.*)", stdout)
    assert isinstance(elastic_password_search, Match)
    elastic_password = elastic_password_search.group(1)

    kibana_password_search = re.search("PASSWORD kibana = (.*)", stdout)
    assert isinstance(kibana_password_search, Match)
    kibana_password = kibana_password_search.group(1)

    if not elastic_password or not kibana_password:
        # Retry.
        return False

    return {"elastic": elastic_password, "kibana": kibana_password}
Esempio n. 10
0
def test_envvar_accross_restarts():
    sleep_duration = 9999
    sdk_upgrade.update_or_upgrade_or_downgrade(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        to_package_version=None,
        additional_options={
            "service": {"name": config.SERVICE_NAME, "sleep": sleep_duration, "yaml": "sidecar"}
        },
        expected_running_tasks=2,
        wait_for_deployment=True,
    )

    for attempt in range(3):
        cmd_list = ["pod", "restart", "hello-0"]
        sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, " ".join(cmd_list))

        sdk_plan.wait_for_kicked_off_recovery(config.SERVICE_NAME)
        sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME)

        _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "hello-0-server", "env")

        envvar = "CONFIG_SLEEP_DURATION="
        envvar_pos = stdout.find(envvar)
        if envvar_pos < 0:
            raise Exception("Required envvar not found")

        if not stdout[envvar_pos + len(envvar) :].startswith("{}".format(sleep_duration)):
            found_string = stdout[envvar_pos + len(envvar) : envvar_pos + len(envvar) + 15]
            log.error(
                "(%d) Looking for %s%d but found: %s", attempt, envvar, sleep_duration, found_string
            )
            raise Exception("Envvar not set to required value")
Esempio n. 11
0
def setup_passwords(service_name: str = SERVICE_NAME,
                    task_name: str = "master-0-node",
                    https: bool = False) -> Union[bool, Dict[str, str]]:
    if https:
        master_0_node_dns = sdk_networks.get_endpoint(PACKAGE_NAME,
                                                      service_name,
                                                      "master-http")["dns"][0]
        url = "--url https://{}".format(master_0_node_dns)
    else:
        url = ""

    cmd = "\n".join([
        "set -x",
        "export JAVA_HOME=$(ls -d ${MESOS_SANDBOX}/jdk*/)",
        "ELASTICSEARCH_PATH=$(ls -d ${MESOS_SANDBOX}/elasticsearch-*/)",
        "${{ELASTICSEARCH_PATH}}/bin/elasticsearch-setup-passwords auto --batch --verbose {}"
        .format(url),
    ])

    full_cmd = "bash -c '{}'".format(cmd)
    _, stdout, _ = sdk_cmd.service_task_exec(service_name, task_name, full_cmd)

    elastic_password_search = re.search("PASSWORD elastic = (.*)", stdout)
    assert isinstance(elastic_password_search, Match)
    elastic_password = elastic_password_search.group(1)

    kibana_password_search = re.search("PASSWORD kibana = (.*)", stdout)
    assert isinstance(kibana_password_search, Match)
    kibana_password = kibana_password_search.group(1)

    if not elastic_password or not kibana_password:
        # Retry.
        return False

    return {"elastic": elastic_password, "kibana": kibana_password}
Esempio n. 12
0
def test_java_keystore():
    """
    Java `keystore-app` presents itself with provided TLS certificate
    from keystore.
    """

    # Make a curl request from artifacts container to `keystore-app`
    # and make sure that mesos curl can verify certificate served by app
    cmd_list = [
        "curl",
        "-v",
        "-i",
        "--cacert",
        "secure-tls-pod.ca",
        "https://{}/hello-world".format(
            sdk_hosts.vip_host(config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME)
        ),
    ]
    curl = " ".join(cmd_list)

    _, _, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME, "artifacts-0-node", curl)
    # Check that HTTP request was successful with response 200 and make sure
    # that curl with pre-configured cert was used and that task was matched
    # by SAN in certificate.
    assert "HTTP/1.1 200 OK" in stderr
    assert "CAfile: secure-tls-pod.ca" in stderr
    tls_verification_msg = (
        'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched '
        'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"'
    )
    assert tls_verification_msg in stderr
Esempio n. 13
0
def cmd(service_name, task_name, command):
    return sdk_cmd.service_task_exec(
        service_name,
        task_name,
        "bash -c 'JAVA_HOME=$(ls -d jdk*/jre/) apache-cassandra-*/bin/nodetool {}'"
        .format(command),
    )
Esempio n. 14
0
def test_custom_log4j2_properties_base64() -> None:
    try:
        decoded_base_64_log4j2_properties = "rootLogger.level = debug"
        base_64_log4j2_properties = base64.b64encode(
            decoded_base_64_log4j2_properties.encode("utf-8")).decode("utf-8")

        sdk_service.update_configuration(
            package_name,
            service_name,
            {
                "elasticsearch": {
                    "custom_log4j2_properties": base_64_log4j2_properties
                }
            },
            current_expected_task_count,
        )

        cmd = "bash -c 'grep \"{}\" elasticsearch-*/config/log4j2.properties'".format(
            decoded_base_64_log4j2_properties)
        rc, stdout, stderr = sdk_cmd.service_task_exec(service_name,
                                                       "master-0-node", cmd)
        assert rc == 0 and decoded_base_64_log4j2_properties in stdout
    finally:
        sdk_service.update_configuration(
            package_name,
            service_name,
            {"elasticsearch": {
                "custom_log4j2_properties": ""
            }},
            current_expected_task_count,
        )
Esempio n. 15
0
def test_tls_basic_artifacts():

    # Load end-entity certificate from keystore and root CA cert from truststore
    stdout = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, 'artifacts-0-node',
        'cat secure-tls-pod.crt')[1].encode('ascii')
    end_entity_cert = x509.load_pem_x509_certificate(stdout, DEFAULT_BACKEND)

    root_ca_cert_in_truststore = _export_cert_from_task_keystore(
        'artifacts-0-node', 'keystore.truststore', 'dcos-root')

    # Check that certificate subject maches the service name
    common_name = end_entity_cert.subject.get_attributes_for_oid(
        NameOID.COMMON_NAME)[0].value
    assert common_name in sdk_hosts.autoip_host(config.SERVICE_NAME,
                                                'artifacts-0-node')

    san_extension = end_entity_cert.extensions.get_extension_for_oid(
        ExtensionOID.SUBJECT_ALTERNATIVE_NAME)
    sans = san_extension.value._general_names._general_names
    assert len(sans) == 1

    cluster_root_ca_cert = x509.load_pem_x509_certificate(
        sdk_cmd.cluster_request('GET', '/ca/dcos-ca.crt').content,
        DEFAULT_BACKEND)

    assert root_ca_cert_in_truststore.signature == cluster_root_ca_cert.signature
Esempio n. 16
0
def _export_cert_from_task_keystore(task_name, keystore_path, alias, password=KEYSTORE_PASS):
    """
    Retrieves certificate from the keystore with given alias by executing
    a keytool in context of running container and loads the certificate to
    memory.

    Args:
        task (str): Task id of container that contains the keystore
        keystore_path (str): Path inside container to keystore containing
            the certificate
        alias (str): Alias of the certificate in the keystore

    Returns:
        x509.Certificate object
    """
    args = ["-rfc"]
    if password:
        args.append('-storepass "{password}"'.format(password=password))

    args_str = " ".join(args)

    cert_bytes = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, task_name, _keystore_export_command(keystore_path, alias, args_str)
    )[1].encode("ascii")

    return x509.load_pem_x509_certificate(cert_bytes, DEFAULT_BACKEND)
Esempio n. 17
0
def test_java_keystore():
    """
    Java `keystore-app` presents itself with provided TLS certificate
    from keystore.
    """

    # Make a curl request from artifacts container to `keystore-app`
    # and make sure that mesos curl can verify certificate served by app
    curl = (
        'curl -v -i '
        '--cacert secure-tls-pod.ca '
        'https://' + sdk_hosts.vip_host(
            config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) + '/hello-world'
        )

    _, output = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'artifacts-0-node', curl, return_stderr_in_stdout=True)
    # Check that HTTP request was successful with response 200 and make sure
    # that curl with pre-configured cert was used and that task was matched
    # by SAN in certificate.
    assert 'HTTP/1.1 200 OK' in output
    assert 'CAfile: secure-tls-pod.ca' in output
    tls_verification_msg = (
        'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched '
        'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"'
    )
    assert tls_verification_msg in output
Esempio n. 18
0
def _export_cert_from_task_keystore(
        task_name, keystore_path, alias, password=KEYSTORE_PASS):
    """
    Retrieves certificate from the keystore with given alias by executing
    a keytool in context of running container and loads the certificate to
    memory.

    Args:
        task (str): Task id of container that contains the keystore
        keystore_path (str): Path inside container to keystore containing
            the certificate
        alias (str): Alias of the certificate in the keystore

    Returns:
        x509.Certificate object
    """
    args = ['-rfc']
    if password:
        args.append('-storepass "{password}"'.format(password=password))

    args_str = ' '.join(args)

    cert_bytes = sdk_cmd.service_task_exec(
        config.SERVICE_NAME,
        task_name,
        _keystore_export_command(keystore_path, alias, args_str))[1].encode('ascii')

    return x509.load_pem_x509_certificate(
        cert_bytes, DEFAULT_BACKEND)
Esempio n. 19
0
def test_tls_basic_artifacts():

    # Load end-entity certificate from keystore and root CA cert from truststore
    stdout = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, "artifacts-0-node", "cat secure-tls-pod.crt"
    )[1].encode("ascii")
    end_entity_cert = x509.load_pem_x509_certificate(stdout, DEFAULT_BACKEND)

    root_ca_cert_in_truststore = _export_cert_from_task_keystore(
        "artifacts-0-node", "keystore.truststore", "dcos-root"
    )

    # Check that certificate subject maches the service name
    common_name = end_entity_cert.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value
    assert common_name in sdk_hosts.autoip_host(config.SERVICE_NAME, "artifacts-0-node")

    san_extension = end_entity_cert.extensions.get_extension_for_oid(
        ExtensionOID.SUBJECT_ALTERNATIVE_NAME
    )
    sans = san_extension.value._general_names._general_names
    assert len(sans) == 1

    cluster_root_ca_cert = x509.load_pem_x509_certificate(
        transport_encryption.fetch_dcos_ca_bundle_contents(), DEFAULT_BACKEND
    )

    assert root_ca_cert_in_truststore.signature == cluster_root_ca_cert.signature
Esempio n. 20
0
def test_java_keystore():
    """
    Java `keystore-app` presents itself with provided TLS certificate
    from keystore.
    """

    # Make a curl request from artifacts container to `keystore-app`
    # and make sure that mesos curl can verify certificate served by app
    cmd_list = [
        "curl",
        "-v",
        "-i",
        "--cacert",
        "secure-tls-pod.ca",
        "https://{}/hello-world".format(
            sdk_hosts.vip_host(config.SERVICE_NAME,
                               KEYSTORE_TASK_HTTPS_PORT_NAME)),
    ]
    curl = " ".join(cmd_list)

    _, _, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME,
                                             "artifacts-0-node", curl)
    # Check that HTTP request was successful with response 200 and make sure
    # that curl with pre-configured cert was used and that task was matched
    # by SAN in certificate.
    assert "HTTP/1.1 200 OK" in stderr
    assert "CAfile: secure-tls-pod.ca" in stderr
    tls_verification_msg = (
        'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched '
        'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"')
    assert tls_verification_msg in stderr
Esempio n. 21
0
def _curl_query(
    service_name,
    method,
    endpoint,
    json_body=None,
    task="master-0-node",
    https=False,
    return_json=True,
    http_user=DEFAULT_ELASTICSEARCH_USER,
    http_password=DEFAULT_ELASTICSEARCH_PASSWORD,
):
    protocol = "https" if https else "http"

    if http_password and not http_user:
        raise Exception(
            "HTTP authentication won't work with just a password. Needs at least user, or both user AND password"
        )

    credentials = ""
    if http_user:
        credentials = "-u {}".format(http_user)
    if http_password:
        credentials = "{}:{}".format(credentials, http_password)

    host = sdk_hosts.autoip_host(service_name, task,
                                 _master_zero_http_port(service_name))

    curl_cmd = "/opt/mesosphere/bin/curl -sS {} -X{} '{}://{}/{}'".format(
        credentials, method, protocol, host, endpoint)

    if json_body:
        curl_cmd += " -H 'Content-type: application/json' -d '{}'".format(
            json.dumps(json_body))

    task_name = "master-0-node"
    exit_code, stdout, stderr = sdk_cmd.service_task_exec(
        service_name, task_name, curl_cmd)

    def build_errmsg(msg):
        return "{}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}".format(
            msg, curl_cmd, stdout, stderr)

    if exit_code:
        log.warning(
            build_errmsg(
                "Failed to run command on {}, retrying or giving up.".format(
                    task_name)))
        return None

    if not return_json:
        return stdout

    try:
        return json.loads(stdout)
    except Exception:
        log.warning(
            build_errmsg(
                "Failed to parse stdout as JSON, retrying or giving up."))
        return None
Esempio n. 22
0
    def run_openssl_command() -> str:
        command = ' '.join([
            'timeout', openssl_timeout,
            'openssl', 's_client', '-cipher', cipher, '-connect', endpoint
        ])

        _, output = sdk_cmd.service_task_exec(service_name, task_name, command, True)
        return output
Esempio n. 23
0
def read_secret(task_name, command):
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name, command)
    lines = [line.strip() for line in output.split('\n')]
    log.info('Looking for %s...', secret_content_default)
    for line in lines:
        if line.startswith(secret_content_default):
            return line
    raise Exception("Failed to read secret from {} with command '{}'".format(task_name, command))
Esempio n. 24
0
    def run_openssl_command() -> str:
        command = ' '.join([
            'timeout', openssl_timeout, 'openssl', 's_client', '-cipher',
            cipher, '-connect', endpoint
        ])

        _, stdout, stderr = sdk_cmd.service_task_exec(service_name, task_name,
                                                      command)
        return stdout + '\n' + stderr
Esempio n. 25
0
def _curl_query(
    service_name: str,
    method: str,
    endpoint: str,
    json_body: Optional[Dict[str, Any]] = None,
    task: str = "master-0-node",
    https: bool = False,
    return_json: bool = True,
    http_user: Optional[str] = DEFAULT_ELASTICSEARCH_USER,
    http_password: Optional[str] = DEFAULT_ELASTICSEARCH_PASSWORD,
) -> Optional[Union[str, Dict[str, Any]]]:
    protocol = "https" if https else "http"

    if http_password:
        if not http_user:
            http_user = DEFAULT_ELASTICSEARCH_USER
            log.info("Using default basic HTTP user: '******'", http_user)

        credentials = "-u {}:{}".format(http_user, http_password)
    else:
        if http_user:
            raise Exception(
                "HTTP authentication won't work with just a user. Needs both user AND password"
            )
        credentials = ""

    host = sdk_hosts.autoip_host(service_name, task, _master_zero_http_port(service_name))

    curl_cmd = "/opt/mesosphere/bin/curl -sS {} -X{} '{}://{}/{}'".format(
        credentials, method, protocol, host, endpoint
    )

    if json_body:
        curl_cmd += " -H 'Content-type: application/json' -d '{}'".format(json.dumps(json_body))

    task_name = "master-0-node"
    exit_code, stdout, stderr = sdk_cmd.service_task_exec(service_name, task_name, curl_cmd)

    def build_errmsg(msg: str) -> str:
        return "{}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}".format(msg, curl_cmd, stdout, stderr)

    if exit_code:
        log.warning(
            build_errmsg("Failed to run command on {}, retrying or giving up.".format(task_name))
        )
        return None

    if not return_json:
        return stdout

    try:
        result = json.loads(stdout)
        assert isinstance(result, dict)
        return result
    except Exception:
        log.warning(build_errmsg("Failed to parse stdout as JSON, retrying or giving up."))
        return None
Esempio n. 26
0
def read_secret(task_name, command):
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name,
                                             command)
    lines = [line.strip() for line in output.split("\n")]
    log.info("Looking for %s...", secret_content_default)
    for line in lines:
        if line.startswith(secret_content_default):
            return line
    raise Exception("Failed to read secret from {} with command '{}'".format(
        task_name, command))
Esempio n. 27
0
def search_for_host_volume(task_name, command, mount_name):
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name, command)
    lines = [line.strip() for line in output.split("\n")]
    log.info("Looking for %s in task mounts.", mount_name)
    for line in lines:
        if mount_name in line:
            return line
    raise Exception(
        "Failed to read host volume mountpoint from {} with command '{}'".format(task_name, command)
    )
Esempio n. 28
0
def read_from_host_volume(task_name, command):
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, task_name, command)
    lines = [line.strip() for line in output.split("\n")]
    log.info("Looking for user root under /etc/group in %s", task_name)
    for line in lines:
        if "root" in line:
            return line
    raise Exception(
        "Failed to read host volume mountpoint from {} with command '{}'".format(task_name, command)
    )
Esempio n. 29
0
    def assert_envvar_has_value(envvar: str, expected_value: str):
        _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "hello-0-server", "env")
        env = dict(l.strip().split("=", 1) for l in stdout.strip().split('\n'))
        val = env.get(envvar, "absent")

        if val == "absent":
            raise ConfigException("Required envvar not found")

        if val != expected_value:
            log.error("Looking for %s=%s but found: %s", envvar, expected_value, val)
            raise ConfigException("Envvar not set to required value")

        log.info("%s has expected value %s", envvar, expected_value)
Esempio n. 30
0
def verify_shared_executor(pod_name, expected_files=['essential', 'nonessential'], delete_files=True):
    '''verify that both tasks share the same executor:
    - matching ExecutorInfo
    - both 'essential' and 'nonessential' present in shared-volume/ across both tasks
    '''
    tasks = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info {}'.format(pod_name), json=True)
    assert len(tasks) == 2

    # check that the task executors all match
    executor = tasks[0]['info']['executor']
    for task in tasks[1:]:
        assert executor == task['info']['executor']

    # for each task, check shared volume content matches what's expected
    task_names = [task['info']['name'] for task in tasks]
    for task_name in task_names:
        # 1.9 just uses the host filesystem in 'task exec', so use 'task ls' across the board instead
        filenames = sdk_cmd.run_cli('task ls {} shared-volume/'.format(task_name)).strip().split()
        assert set(expected_files) == set(filenames)

    # delete files from volume in preparation for a following task relaunch
    if delete_files:
        if sdk_utils.dcos_version_less_than("1.10"):
            # 1.9 just uses the host filesystem in 'task exec', so figure out the absolute volume path manually
            expected_file_path = sdk_cmd.service_task_exec(
                config.SERVICE_NAME,
                task_names[0],
                'find /var/lib/mesos/slave/volumes -iname ' + filenames[0])[1].strip()
            # volume dir is parent of the expected file path.
            volume_dir = os.path.dirname(expected_file_path)
        else:
            # 1.10+ works correctly: path is relative to sandbox
            volume_dir = 'shared-volume/'
        sdk_cmd.service_task_exec(
            config.SERVICE_NAME,
            task_names[0],
            'rm ' + ' '.join([os.path.join(volume_dir, name) for name in filenames]))
Esempio n. 31
0
    def run_openssl_command() -> str:
        command = " ".join([
            "timeout",
            openssl_timeout,
            "openssl",
            "s_client",
            "-cipher",
            cipher,
            "-connect",
            endpoint,
        ])

        _, stdout, stderr = sdk_cmd.service_task_exec(service_name, task_name,
                                                      command)
        return stdout + "\n" + stderr
Esempio n. 32
0
    def assert_envvar_has_value(envvar: str, expected_value: str):
        _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME,
                                                 "hello-0-server", "env")
        env = dict(l.strip().split("=", 1) for l in stdout.strip().split('\n'))
        val = env.get(envvar, "absent")

        if val == "absent":
            raise ConfigException("Required envvar not found")

        if val != expected_value:
            log.error("Looking for %s=%s but found: %s", envvar,
                      expected_value, val)
            raise ConfigException("Envvar not set to required value")

        log.info("%s has expected value %s", envvar, expected_value)
Esempio n. 33
0
    def run_openssl_command() -> str:
        command = " ".join(
            [
                "timeout",
                openssl_timeout,
                "openssl",
                "s_client",
                "-cipher",
                cipher,
                "-connect",
                endpoint,
            ]
        )

        _, stdout, stderr = sdk_cmd.service_task_exec(service_name, task_name, command)
        return stdout + "\n" + stderr
Esempio n. 34
0
def test_config_update_across_restart():
    foldered_service_name = config.get_foldered_service_name()

    batch_size_warn_threshold_in_kb = 15
    sdk_upgrade.update_or_upgrade_or_downgrade(
        config.PACKAGE_NAME,
        foldered_service_name,
        to_package_version=None,
        additional_options={
            "service": {"name": foldered_service_name},
            "cassandra": {"batch_size_warn_threshold_in_kb": batch_size_warn_threshold_in_kb},
        },
        expected_running_tasks=config.DEFAULT_TASK_COUNT,
        wait_for_deployment=True,
        timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT,
    )

    for _ in range(3):
        cmd_list = ["pod", "restart", "node-0"]
        sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_service_name, " ".join(cmd_list))

        sdk_plan.wait_for_kicked_off_recovery(foldered_service_name)
        sdk_plan.wait_for_completed_recovery(
            foldered_service_name, timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT
        )

        _, stdout, _ = sdk_cmd.service_task_exec(foldered_service_name, "node-0-server", "env")

        envvar = "CASSANDRA_BATCH_SIZE_WARN_THRESHOLD_IN_KB="
        envvar_pos = stdout.find(envvar)
        if envvar_pos < 0:
            raise Exception("Required envvar not found")

        if not stdout[envvar_pos + len(envvar) :].startswith(
            "{}".format(batch_size_warn_threshold_in_kb)
        ):
            found_string = stdout[envvar_pos + len(envvar) : envvar_pos + len(envvar) + 15]
            log.error(
                "Looking for %s%d but found: %s",
                envvar,
                batch_size_warn_threshold_in_kb,
                found_string,
            )
            raise Exception("Envvar not set to required value")
Esempio n. 35
0
def test_java_truststore():
    """
    Make an HTTP request from CLI to nginx exposed service.
    Test that CLI reads and uses truststore to verify HTTPS connection.
    """
    # Make an http request from a CLI app using configured keystore to the
    # service itself exposed via VIP.
    # This will test whether the service is serving correct end-entity
    # certificate from keystore and if CLI client can verify certificate
    # with custom truststore configuration.
    command = _java_command(
        'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest '
        'integration-test.yml '
        'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME))
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'keystore-0-webserver', command)
    # Unfortunately the `dcos task exec` doesn't respect the return code
    # from executed command in container so we need to manually assert for
    # expected output.
    assert 'status=200' in output
Esempio n. 36
0
def test_tls_nginx():
    """
    Checks that NGINX exposes TLS service with correct PEM encoded end-entity
    certificate.
    """

    # Use keystore-app `truststoretest` CLI command to run request against
    # the NGINX container to verify that nginx presents itself with end-entity
    # certificate that can be verified by with truststore.
    command = _java_command(
        'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest '
        'integration-test.yml '
        'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + '/')
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'keystore-0-webserver', command)

    # Unfortunately the `dcos task exec` doesn't respect the return code
    # from executed command in container so we need to manually assert for
    # expected output.
    assert 'status=200' in output
Esempio n. 37
0
def test_tls_nginx():
    """
    Checks that NGINX exposes TLS service with correct PEM encoded end-entity
    certificate.
    """

    # Use keystore-app `truststoretest` CLI command to run request against
    # the NGINX container to verify that nginx presents itself with end-entity
    # certificate that can be verified by with truststore.
    command = _java_command(
        "java -jar " + KEYSTORE_APP_JAR_NAME + " truststoretest "
        "integration-test.yml "
        "https://" + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + "/"
    )
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "keystore-0-webserver", command)

    # Unfortunately the `dcos task exec` doesn't respect the return code
    # from executed command in container so we need to manually assert for
    # expected output.
    assert "status=200" in output
Esempio n. 38
0
def test_java_truststore():
    """
    Make an HTTP request from CLI to nginx exposed service.
    Test that CLI reads and uses truststore to verify HTTPS connection.
    """
    # Make an http request from a CLI app using configured keystore to the
    # service itself exposed via VIP.
    # This will test whether the service is serving correct end-entity
    # certificate from keystore and if CLI client can verify certificate
    # with custom truststore configuration.
    command = _java_command(
        "java -jar " + KEYSTORE_APP_JAR_NAME + " truststoretest "
        "integration-test.yml "
        "https://" + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME)
    )
    _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "keystore-0-webserver", command)
    # Unfortunately the `dcos task exec` doesn't respect the return code
    # from executed command in container so we need to manually assert for
    # expected output.
    assert "status=200" in output
Esempio n. 39
0
def wait_for_toxic_sidecar():
    """
    Since the sidecar task fails too quickly, we check for the contents of
    the file generated in hello-container-path/toxic-output instead

    Note that we only check the output of hello-0.

    In DC/OS prior to version 1.10, task exec does not run the command in the MESOS_SANDBOX directory and this
    causes the check of the file contents to fail. Here we simply rely on the existence of the file.
    """
    if sdk_utils.dcos_version_less_than("1.10"):
        # Note: As of this writing, 'task ls' does 'contains' comparisons of task ids correctly,
        # so we don't need to include a service name prefix here.
        _, output, _ = sdk_cmd.run_cli("task ls hello-0-server hello-container-path/toxic-output")
        expected_output = ""
    else:
        _, output, _ = sdk_cmd.service_task_exec(
            config.SERVICE_NAME, "hello-0-server", "cat hello-container-path/toxic-output"
        )
        expected_output = "I'm addicted to you / Don't you know that you're toxic?"
    return output.strip() == expected_output
Esempio n. 40
0
def wait_for_toxic_sidecar():
    """
    Since the sidecar task fails too quickly, we check for the contents of
    the file generated in hello-container-path/toxic-output instead

    Note that we only check the output of hello-0.

    In DC/OS prior to version 1.10, task exec does not run the command in the MESOS_SANDBOX directory and this
    causes the check of the file contents to fail. Here we simply rely on the existence of the file.
    """
    if sdk_utils.dcos_version_less_than("1.10"):
        # Note: As of this writing, 'task ls' does 'contains' comparisons of task ids correctly,
        # so we don't need to include a service name prefix here.
        _, output, _ = sdk_cmd.run_cli("task ls hello-0-server hello-container-path/toxic-output")
        expected_output = ""
    else:
        _, output, _ = sdk_cmd.service_task_exec(
            config.SERVICE_NAME, "hello-0-server", "cat hello-container-path/toxic-output"
        )
        expected_output = "I'm addicted to you / Don't you know that you're toxic?"
    return output.strip() == expected_output
Esempio n. 41
0
def _curl_query(service_name,
                method,
                endpoint,
                json_data=None,
                role="master",
                https=False,
                return_json=True):
    protocol = "https" if https else "http"
    host = sdk_hosts.autoip_host(service_name, "{}-0-node".format(role),
                                 _master_zero_http_port(service_name))
    curl_cmd = "/opt/mesosphere/bin/curl -sS -u elastic:changeme -X{} '{}://{}/{}'".format(
        method, protocol, host, endpoint)
    if json_data:
        curl_cmd += " -H 'Content-type: application/json' -d '{}'".format(
            json.dumps(json_data))
    task_name = "master-0-node"
    exit_code, stdout, stderr = sdk_cmd.service_task_exec(
        service_name, task_name, curl_cmd)

    def build_errmsg(msg):
        return "{}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}".format(
            msg, curl_cmd, stdout, stderr)

    if exit_code:
        log.warning(
            build_errmsg(
                "Failed to run command on {}, retrying or giving up.".format(
                    task_name)))
        return None

    if not return_json:
        return stdout

    try:
        return json.loads(stdout)
    except Exception:
        log.warning(
            build_errmsg(
                "Failed to parse stdout as JSON, retrying or giving up."))
        return None
Esempio n. 42
0
def setup_passwords(
    service_name: str = SERVICE_NAME, task_name: str = "master-0-node", https: bool = False
) -> Union[bool, Dict[str, str]]:
    if https:
        master_0_node_dns = sdk_networks.get_endpoint(PACKAGE_NAME, service_name, "master-http")[
            "dns"
        ][0]
        url = "--url https://{}".format(master_0_node_dns)
    else:
        url = ""

    cmd = "\n".join(
        [
            "set -x",
            "export JAVA_HOME=$(ls -d ${MESOS_SANDBOX}/jdk*/jre/)",
            "ELASTICSEARCH_PATH=$(ls -d ${MESOS_SANDBOX}/elasticsearch-*/)",
            "${{ELASTICSEARCH_PATH}}/bin/elasticsearch-setup-passwords auto --batch --verbose {}".format(
                url
            ),
        ]
    )

    full_cmd = "bash -c '{}'".format(cmd)
    _, stdout, _ = sdk_cmd.service_task_exec(service_name, task_name, full_cmd)

    elastic_password_search = re.search("PASSWORD elastic = (.*)", stdout)
    assert isinstance(elastic_password_search, Match)
    elastic_password = elastic_password_search.group(1)

    kibana_password_search = re.search("PASSWORD kibana = (.*)", stdout)
    assert isinstance(kibana_password_search, Match)
    kibana_password = kibana_password_search.group(1)

    if not elastic_password or not kibana_password:
        # Retry.
        return False

    return {"elastic": elastic_password, "kibana": kibana_password}
Esempio n. 43
0
def _curl_query(service_name, method, endpoint, json_data=None, role="master", https=False, return_json=True):
    protocol = 'https' if https else 'http'
    host = sdk_hosts.autoip_host(service_name, "{}-0-node".format(role), _master_zero_http_port(service_name))
    curl_cmd = "/opt/mesosphere/bin/curl -sS -u elastic:changeme -X{} '{}://{}/{}'".format(method, protocol, host, endpoint)
    if json_data:
        curl_cmd += " -H 'Content-type: application/json' -d '{}'".format(json.dumps(json_data))
    task_name = "master-0-node"
    exit_code, stdout, stderr = sdk_cmd.service_task_exec(service_name, task_name, curl_cmd)

    def build_errmsg(msg):
        return "{}\nCommand:\n{}\nstdout:\n{}\nstderr:\n{}".format(msg, curl_cmd, stdout, stderr)

    if exit_code:
        log.warning(build_errmsg("Failed to run command on {}, retrying or giving up.".format(task_name)))
        return None

    if not return_json:
        return stdout

    try:
        return json.loads(stdout)
    except:
        log.warning(build_errmsg("Failed to parse stdout as JSON, retrying or giving up."))
        return None
Esempio n. 44
0
def cmd(service_name: str, task_name: str, command: str) -> Tuple[int, str, str]:
    return sdk_cmd.service_task_exec(
        service_name,
        task_name,
        "bash -c 'JAVA_HOME=$(ls -d jdk*/jre/) apache-cassandra-*/bin/nodetool {}'".format(command),
    )
def test_xmx_and_xms_flags(configure_security):
    """ method to test the duplication of JVM flags in elastic tasks """

    # setting custom values for the heap of various pods
    MASTER_NODE_HEAP = 700
    DATA_NODE_HEAP = 800
    COORDINATOR_NODE_HEAP = 900
    INGEST_NODE_HEAP = 1000

    sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
    # installing elastic service and passing customized json to overwrite default values.
    sdk_install.install(
        config.PACKAGE_NAME,
        config.SERVICE_NAME,
        config.DEFAULT_TASK_COUNT,
        {
            "master_nodes": {"heap": {"size": MASTER_NODE_HEAP}},
            "data_nodes": {"heap": {"size": DATA_NODE_HEAP}},
            "coordinator_nodes": {"heap": {"size": COORDINATOR_NODE_HEAP}},
            "ingest_nodes": {"heap": {"size": INGEST_NODE_HEAP}},
        },
    )
    # getting all the tasks and checking the flag duplicacy by running curl_cmd command.
    for task in sdk_tasks.get_task_ids(config.SERVICE_NAME):
        cmd = "ps aux"
        flag_xms = "Xms"
        flag_xmx = "Xmx"
        exit_code, stdout, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME, task, cmd)

        assert str(stdout).count(flag_xms) == 1, "Default xms flag prefix should appear once"

        assert str(stdout).count(flag_xmx) == 1, "Default xmx flag prefix should appear once"

        if str(task).count("master"):
            master_xms = flag_xms + str(MASTER_NODE_HEAP)
            master_xmx = flag_xmx + str(MASTER_NODE_HEAP)
            log.info("Checking flags in master node: " + task)
            assert (
                str(stdout).count(master_xms) == 1
            ), "Configured master node xms flag prefix should appear once"
            assert (
                str(stdout).count(master_xmx) == 1
            ), "Configured master node xmx flag prefix should appear once"

        if str(task).count("data"):
            data_xms = flag_xms + str(DATA_NODE_HEAP)
            data_xmx = flag_xmx + str(DATA_NODE_HEAP)
            log.info("Checking flags in data node: " + task)
            assert (
                str(stdout).count(data_xms) == 1
            ), "Configured data node xms flag prefix should appear once"
            assert (
                str(stdout).count(data_xmx) == 1
            ), "Configured data node xmx flag prefix should appear once"

        if str(task).count("coordinator"):
            coordinator_xms = flag_xms + str(COORDINATOR_NODE_HEAP)
            coordinator_xmx = flag_xmx + str(COORDINATOR_NODE_HEAP)
            log.info("Checking flags in coordinator node: " + task)
            assert (
                str(stdout).count(coordinator_xms) == 1
            ), "Configured coordinator node xms flag prefix should appear once"
            assert (
                str(stdout).count(coordinator_xmx) == 1
            ), "Configured coordinator node xmx flag prefix should appear once"

        if str(task).count("ingest"):
            ingest_xms = flag_xms + str(INGEST_NODE_HEAP)
            ingest_xmx = flag_xmx + str(INGEST_NODE_HEAP)
            log.info("Checking flags in ingest node: " + task)
            assert (
                str(stdout).count(ingest_xms) == 1
            ), "Configured ingest node flag xms prefix should appear once"
            assert (
                str(stdout).count(ingest_xmx) == 1
            ), "Configured ingest node flag xmx prefix should appear once"

    # uninstalling the installed service
    sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
Esempio n. 46
0
def test_tmp_directory_created():
    code, stdout, stderr = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, "hello-0-server", "echo bar > /tmp/bar && cat tmp/bar | grep bar"
    )
    assert code > 0
Esempio n. 47
0
def test_tmp_directory_created():
    code, stdout, stderr = sdk_cmd.service_task_exec(
        config.SERVICE_NAME, "hello-0-server",
        "echo bar > /tmp/bar && cat tmp/bar | grep bar")
    assert code > 0
Esempio n. 48
0
def cmd(service_name, task_name, command):
    return sdk_cmd.service_task_exec(service_name, task_name,
        "bash -c 'JAVA_HOME=$(ls -d jdk*/jre/) apache-cassandra-*/bin/nodetool {}'".format(command))