def kafka_client(kerberos): """ A pytest fixture to install a Kafka client as a Marathon application. This client is capable of both Kerberos and TLS communication. On teardown, the client is uninstalled. """ try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, "volumes": [ { "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab" } ] }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path(), } }, "networks": [ { "mode": "host" } ], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", "KAFKA_BROKER_LIST": "" } } sdk_marathon.install_app(client) # Create a TLS certificate for the TLS tests transport_encryption.create_tls_artifacts( cn="client", marathon_task=client_id) yield { **client, **{ "tls-id": "client", } } finally: sdk_marathon.destroy_app(client_id)
def install(self) -> Dict[str, Any]: if sdk_marathon.app_exists(self.app_definition["id"]): if self._persist: log.info("Found installed KDC app, reusing it") return _get_kdc_task(self.app_definition["id"]) log.info("Found installed KDC app, destroying it first") sdk_marathon.destroy_app(self.app_definition["id"]) # (re-)create a service account for the KDC service sdk_security.create_service_account( service_account_name=KDC_SERVICE_ACCOUNT, service_account_secret=KDC_SERVICE_ACCOUNT_SECRET, ) sdk_security._grant( KDC_SERVICE_ACCOUNT, "dcos:secrets:default:%252F*", "Create any secret in the root path", "create", ) sdk_security._grant( KDC_SERVICE_ACCOUNT, "dcos:secrets:default:%252F*", "Update any secret in the root path", "update", ) log.info("Installing KDC Marathon app") sdk_marathon.install_app(self.app_definition) log.info("KDC app installed successfully") log.info("Waiting for KDC web API endpoint to become available") self.__wait_for_kdc_api() log.info("KDC web API is now available") return _get_kdc_task(self.app_definition["id"])
def test_user_can_write_and_read(kerberos): try: client_app_def = config.get_kerberized_hdfs_client_app() client_app_def["secrets"]["hdfs_keytab"][ "source"] = kerberos.get_keytab_path() client_app_def["env"]["REALM"] = kerberos.get_realm() client_app_def["env"]["KDC_ADDRESS"] = kerberos.get_kdc_address() sdk_marathon.install_app(client_app_def) client_task_id = client_app_def["id"] sdk_auth.kinit(client_task_id, keytab=config.KEYTAB, principal=config.GENERIC_HDFS_USER_PRINCIPAL) write_cmd = "/bin/bash -c '{}'".format( config.hdfs_write_command(config.TEST_FILE_1_NAME, config.TEST_CONTENT_SMALL)) sdk_tasks.task_exec(client_task_id, write_cmd) read_cmd = "/bin/bash -c '{}'".format( config.hdfs_read_command(config.TEST_FILE_1_NAME)) _, stdout, _ = sdk_tasks.task_exec(client_task_id, read_cmd) assert stdout == config.TEST_CONTENT_SMALL finally: sdk_marathon.destroy_app(client_task_id)
def install(self, kerberos: sdk_auth.KerberosEnvironment = None) -> dict: options = { "id": self.id, "mem": 512, "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:deca3d0", "forcePullImage": True }, }, "networks": [{ "mode": "host" }], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", }, } if kerberos is not None: self._is_kerberos = True options = sdk_utils.merge_dictionaries( options, self._get_kerberos_options(kerberos)) sdk_marathon.install_app(options) return options
def __init__(self): """ Installs the Kerberos Domain Controller (KDC) as the initial step in creating a kerberized cluster. This just passes a dictionary to be rendered as a JSON app defefinition to marathon. """ self.temp_working_dir = _create_temp_working_dir() kdc_app_def_path = "{current_file_dir}/../tools/kdc.json".format( current_file_dir=os.path.dirname(os.path.realpath(__file__))) with open(kdc_app_def_path) as f: kdc_app_def = json.load(f) kdc_app_def["id"] = KERBEROS_APP_ID sdk_marathon.install_app(kdc_app_def) self.kdc_port = int(kdc_app_def["portDefinitions"][0]["port"]) self.kdc_host = "{app_name}.{service_name}.{autoip_host_suffix}".format( app_name=KERBEROS_APP_ID, service_name="marathon", autoip_host_suffix=sdk_hosts.AUTOIP_HOST_SUFFIX) self.kdc_realm = REALM self.kdc_task = _get_kdc_task() self.framework_id = self.kdc_task["framework_id"] self.task_id = self.kdc_task["id"] self.kdc_host_id = self.kdc_task["slave_id"] self.kdc_host_name = _get_host_name(self.kdc_host_id) self.master_public_ip = _get_master_public_ip() self.principals = [] self.keytab_file_name = KERBEROS_KEYTAB_FILE_NAME self.base64_encoded_keytab_file_name = BASE64_ENCODED_KEYTAB_FILE_NAME.format(keytab_name=self.keytab_file_name) # For secret creation/deletion cmd = "package install --yes --cli dcos-enterprise-cli" try: sdk_cmd.run_cli(cmd) except dcos.errors.DCOSException as e: raise RuntimeError("Failed to install the dcos-enterprise-cli: {}".format(repr(e)))
def hdfs_client(kerberos, hdfs_server): try: client = config.get_hdfs_client_app(hdfs_server["service"]["name"], kerberos) sdk_marathon.install_app(client) krb5.write_krb5_config_file(client["id"], "/etc/krb5.conf", kerberos) yield client finally: sdk_marathon.destroy_app(client["id"])
def hdfs_client(hdfs_service): try: client = config.get_hdfs_client_app(hdfs_service["service"]["name"]) sdk_marathon.install_app(client) yield client finally: sdk_marathon.destroy_app(client["id"])
def hdfs_client(): try: client = config.get_hdfs_client_app(config.SERVICE_NAME) sdk_marathon.install_app(client) yield client finally: sdk_marathon.destroy_app(client["id"])
def kafka_client(kerberos, kafka_server): brokers = sdk_cmd.svc_cli( kafka_server["package_name"], kafka_server["service"]["name"], "endpoint broker-tls", json=True)["dns"] try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, "volumes": [ { "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab" } ] }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path(), } }, "networks": [ { "mode": "host" } ], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", "KAFKA_BROKER_LIST": ",".join(brokers) } } sdk_marathon.install_app(client) transport_encryption.create_tls_artifacts( cn="client", task=client_id) broker_hosts = list(map(lambda x: x.split(':')[0], brokers)) yield {**client, **{"brokers": broker_hosts}} finally: sdk_marathon.destroy_app(client_id)
def kafka_client(kerberos, kafka_server): brokers = sdk_cmd.svc_cli( kafka_server["package_name"], kafka_server["service"]["name"], "endpoint broker-tls", json=True)["dns"] try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, "volumes": [ { "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab" } ] }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path(), } }, "networks": [ { "mode": "host" } ], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", "KAFKA_BROKER_LIST": ",".join(brokers) } } sdk_marathon.install_app(client) transport_encryption.create_tls_artifacts( cn="client", marathon_task=client_id) broker_hosts = list(map(lambda x: x.split(':')[0], brokers)) yield {**client, **{"brokers": broker_hosts}} finally: sdk_marathon.destroy_app(client_id)
def kafka_client(kerberos): """ A pytest fixture to install a Kafka client as a Marathon application. This client is capable of both Kerberos and TLS communication. On teardown, the client is uninstalled. """ try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:4b9c060", "forcePullImage": True }, "volumes": [{ "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab" }] }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path(), } }, "networks": [{ "mode": "host" }], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", "KAFKA_BROKER_LIST": "" } } sdk_marathon.install_app(client) # Create a TLS certificate for the TLS tests transport_encryption.create_tls_artifacts(cn="client", marathon_task=client_id) yield { **client, **{ "tls-id": "client", } } finally: sdk_marathon.destroy_app(client_id)
def hdfs_client(kerberos, hdfs_server): try: client_id = "hdfs-client" client = { "id": client_id, "mem": 1024, "user": "******", "container": { "type": "MESOS", "docker": { "image": "nvaziri/hdfs-client:stable", "forcePullImage": True }, "volumes": [{ "containerPath": "/{}/hdfs.keytab".format(config.HADOOP_VERSION), "secret": "hdfs_keytab" }] }, "secrets": { "hdfs_keytab": { "source": kerberos.get_keytab_path() } }, "networks": [{ "mode": "host" }], "env": { "REALM": kerberos.get_realm(), "KDC_ADDRESS": kerberos.get_kdc_address(), "JAVA_HOME": "/usr/lib/jvm/default-java", "KRB5_CONFIG": "/etc/krb5.conf", "HDFS_SERVICE_NAME": sdk_hosts._safe_name(config.FOLDERED_SERVICE_NAME), "HADOOP_VERSION": config.HADOOP_VERSION } } sdk_marathon.install_app(client) krb5.write_krb5_config_file(client_id, "/etc/krb5.conf", kerberos) yield client finally: sdk_marathon.destroy_app(client_id)
def kerberized_hdfs_client(kerberos): try: client_app_def = config.get_kerberized_hdfs_client_app() client_app_def["secrets"]["hdfs_keytab"][ "source"] = kerberos.get_keytab_path() client_app_def["env"]["REALM"] = kerberos.get_realm() client_app_def["env"]["KDC_ADDRESS"] = kerberos.get_kdc_address() sdk_marathon.install_app(client_app_def) yield client_app_def["id"] finally: sdk_marathon.destroy_app(client_app_def["id"])
def install(self) -> dict: if sdk_marathon.app_exists(self.app_definition["id"]): if self._persist: log.info("Found installed KDC app, reusing it") return _get_kdc_task(self.app_definition["id"]) log.info("Found installed KDC app, destroying it first") sdk_marathon.destroy_app(self.app_definition["id"]) log.info("Installing KDC Marathon app") sdk_marathon.install_app(self.app_definition) log.info("KDC app installed successfully") return _get_kdc_task(self.app_definition["id"])
def kafka_client(kerberos, kafka_server): brokers = sdk_cmd.svc_cli(kafka_server["package_name"], kafka_server["service"]["name"], "endpoint broker", parse_json=True)[1]["dns"] try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:4b9c060", "forcePullImage": True }, "volumes": [{ "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab", }], }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path() } }, "networks": [{ "mode": "host" }], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_TOPIC": "securetest", "KAFKA_BROKER_LIST": ",".join(brokers), }, } sdk_marathon.install_app(client) yield { **client, **{ "brokers": list(map(lambda x: x.split(":")[0], brokers)) } } finally: sdk_marathon.destroy_app(client_id)
def hdfs_client(kerberos, hdfs_server): try: client_id = "hdfs-client" client = { "id": client_id, "mem": 1024, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/hdfs-client:dev", "forcePullImage": True }, "volumes": [ { "containerPath": "/hadoop-2.6.0-cdh5.9.1/hdfs.keytab", "secret": "hdfs_keytab" } ] }, "secrets": { "hdfs_keytab": { "source": kerberos.get_keytab_path() } }, "networks": [ { "mode": "host" } ], "env": { "REALM": kerberos.get_realm(), "KDC_ADDRESS": kerberos.get_kdc_address(), "JAVA_HOME": "/usr/lib/jvm/default-java", "KRB5_CONFIG": "/etc/krb5.conf", "HDFS_SERVICE_NAME": config.SERVICE_NAME, "HADOOP_VERSION": config.HADOOP_VERSION } } sdk_marathon.install_app(client) krb5.write_krb5_config_file(client_id, "/etc/krb5.conf", kerberos) dcos_ca_bundle = transport_encryption.fetch_dcos_ca_bundle(client_id) yield {**client, **{"dcos_ca_bundle": dcos_ca_bundle}} finally: sdk_marathon.destroy_app(client_id)
def hdfs_client(kerberos, hdfs_server): try: client_id = "hdfs-client" client = { "id": client_id, "mem": 1024, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/hdfs-client:dev", "forcePullImage": True }, "volumes": [ { "containerPath": "/hadoop-2.6.0-cdh5.9.1/hdfs.keytab", "secret": "hdfs_keytab" } ] }, "secrets": { "hdfs_keytab": { "source": kerberos.get_keytab_path() } }, "networks": [ { "mode": "host" } ], "env": { "REALM": kerberos.get_realm(), "KDC_ADDRESS": kerberos.get_kdc_address(), "JAVA_HOME": "/usr/lib/jvm/default-java", "KRB5_CONFIG": "/etc/krb5.conf", "HDFS_SERVICE_NAME": config.SERVICE_NAME, } } sdk_marathon.install_app(client) krb5.write_krb5_config_file(client_id, "/etc/krb5.conf", kerberos) dcos_ca_bundle = transport_encryption.fetch_dcos_ca_bundle(client_id) yield {**client, **{"dcos_ca_bundle": dcos_ca_bundle}} finally: sdk_marathon.destroy_app(client_id)
def setup_hdfs_client(hdfs_with_kerberos): try: curr_dir = os.path.dirname(os.path.realpath(__file__)) app_def_path = "{}/resources/hdfsclient.json".format(curr_dir) with open(app_def_path) as f: hdfsclient_app_def = json.load(f) hdfsclient_app_def["id"] = HDFS_CLIENT_ID hdfsclient_app_def["secrets"]["hdfs_keytab"][ "source"] = KEYTAB_SECRET_PATH sdk_marathon.install_app(hdfsclient_app_def) sdk_auth.kinit(HDFS_CLIENT_ID, keytab="hdfs.keytab", principal=GENERIC_HDFS_USER_PRINCIPAL) yield finally: sdk_marathon.destroy_app(HDFS_CLIENT_ID)
def kafka_client(): brokers = [ "kafka-0-broker.{}.autoip.dcos.thisdcos.directory:1030".format( config.SERVICE_NAME), "kafka-1-broker.{}.autoip.dcos.thisdcos.directory:1030".format( config.SERVICE_NAME), "kafka-2-broker.{}.autoip.dcos.thisdcos.directory:1030".format( config.SERVICE_NAME) ] try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, }, "networks": [{ "mode": "host" }], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_BROKER_LIST": ",".join(brokers), "KAFKA_OPTS": "" } } sdk_marathon.install_app(client) yield { **client, **{ "brokers": list(map(lambda x: x.split(':')[0], brokers)) } } finally: sdk_marathon.destroy_app(client_id)
def kafka_client(kerberos): try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "user": "******", "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, "volumes": [{ "containerPath": "/tmp/kafkaconfig/kafka-client.keytab", "secret": "kafka_keytab" }] }, "secrets": { "kafka_keytab": { "source": kerberos.get_keytab_path(), } }, "networks": [{ "mode": "host" }], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_SERVICE_NAME": config.SERVICE_NAME } } sdk_marathon.install_app(client) yield client["id"] finally: sdk_marathon.destroy_app(client_id)
def kafka_client(): brokers = ["kafka-0-broker.{}.autoip.dcos.thisdcos.directory:1030".format(config.SERVICE_NAME), "kafka-1-broker.{}.autoip.dcos.thisdcos.directory:1030".format(config.SERVICE_NAME), "kafka-2-broker.{}.autoip.dcos.thisdcos.directory:1030".format(config.SERVICE_NAME)] try: client_id = "kafka-client" client = { "id": client_id, "mem": 512, "container": { "type": "MESOS", "docker": { "image": "elezar/kafka-client:latest", "forcePullImage": True }, }, "networks": [ { "mode": "host" } ], "env": { "JVM_MaxHeapSize": "512", "KAFKA_CLIENT_MODE": "test", "KAFKA_BROKER_LIST": ",".join(brokers), "KAFKA_OPTS": "" } } sdk_marathon.install_app(client) broker_hosts = list(map(lambda x: x.split(':')[0], brokers)) yield {**client, **{"brokers": broker_hosts}} finally: sdk_marathon.destroy_app(client_id)
def main(app_id: str, dcos_username: str, dcos_password: str, input_file_uri: str, script_cpus: int, script_mem: int, script_args: str, security: str, spark_build_branch: str): def _get_app_defn(app_id: str, dcos_username: str, dcos_password: str, input_file_uri: str, script_cpus: int, script_mem: int, script_args: str, security: str, spark_build_branch: str) -> typing.Dict: """ Construct the marathon app definition. """ app_defn = { "id": app_id, "cmd": "cd $MESOS_SANDBOX; git clone https://github.com/mesosphere/spark-build.git; cd spark-build/; pwd; git checkout $SPARK_BUILD_BRANCH; python3 -m venv test-env; pip3 install -r scale-tests/requirements.txt; dcos cluster setup https://master.mesos --username=$DCOS_UID --password=$DCOS_PASSWORD --no-check; dcos package install spark --cli --yes; cd scale-tests/; export PYTHONPATH=../spark-testing:../testing; python3 batch_test.py $MESOS_SANDBOX/$SCRIPT_ARGS", "container": { "type": "DOCKER", "docker": { "image": "susanxhuynh/dcos-commons:spark", } }, "cpus": script_cpus, "mem": script_mem, "disk": 1024, "env": { "DCOS_UID": dcos_username, "DCOS_PASSWORD": dcos_password, "SCRIPT_ARGS": script_args.strip(), "SPARK_BUILD_BRANCH": spark_build_branch, "SECURITY": security }, "fetch": [ { "uri": input_file_uri, } ] } return app_defn (success, err_msg) = sdk_marathon.install_app(_get_app_defn(app_id, dcos_username, dcos_password, input_file_uri, script_cpus, script_mem, script_args, security, spark_build_branch)) assert success, err_msg
def test_marathon_volume_collission(): # This test validates that a service registered in a sub-role of # slave_public will _not_ unreserve Marathon volumes RESERVED # in the `slave_public` role. # Uninstall HW first sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Install the marathon app marathon_app_name = "persistent-test" persistent_app = { "id": marathon_app_name, "mem": 128, "user": "******", "cmd": "echo 'this is a test' > persistent-volume/test && sleep 10000", "container": { "type": "MESOS", "volumes": [{ "persistent": { "type": "root", "size": 500, "constraints": [] }, "mode": "RW", "containerPath": "persistent-volume" }] } } try: sdk_marathon.install_app(persistent_app) # Get its persistent Volume host = sdk_marathon.get_scheduler_host(marathon_app_name) ok, pv_name = shakedown.run_command_on_agent( host, "ls /var/lib/mesos/slave/volumes/roles/slave_public") assert ok pv_name = pv_name.strip() @retrying.retry(wait_fixed=1000, stop_max_delay=60 * 1000) def check_content(): ok, pv_content = shakedown.run_command_on_agent( host, "cat /var/lib/mesos/slave/volumes/roles/slave_public/{}/test". format(pv_name)) assert pv_content.strip() == "this is a test" check_content() # Scale down the Marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config['instances'] = 0 sdk_marathon.update_app(marathon_app_name, app_config) # Install Hello World sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options) # Make sure the persistent volume is still there check_content() # Uninstall Hello World sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Make sure the persistent volume is still there check_content() # Scale back up the marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config['instances'] = 1 sdk_marathon.update_app(marathon_app_name, app_config) # Make sure the persistent volume is still there check_content() finally: # Reinstall hello world sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options) sdk_marathon.destroy_app(marathon_app_name)
def _install_marathon_app(app_definition): success, _ = sdk_marathon.install_app(app_definition) return success
def test_marathon_volume_collission(): # This test validates that a service registered in a sub-role of # slave_public will _not_ unreserve Marathon volumes RESERVED # in the `slave_public` role. # Uninstall HW first sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Install the marathon app marathon_app_name = "persistent-test" persistent_app = { "id": marathon_app_name, "mem": 128, "user": "******", "cmd": "echo 'this is a test' > persistent-volume/test && sleep 10000", "container": { "type": "MESOS", "volumes": [ { "persistent": { "type": "root", "size": 500, "constraints": [] }, "mode": "RW", "containerPath": "persistent-volume" } ] } } try: sdk_marathon.install_app(persistent_app) # Get its persistent Volume host = sdk_marathon.get_scheduler_host(marathon_app_name) ok, pv_name = sdk_cmd.agent_ssh(host, "ls /var/lib/mesos/slave/volumes/roles/slave_public") assert ok pv_name = pv_name.strip() @retrying.retry(wait_fixed=1000, stop_max_delay=60*1000) def check_content(): ok, pv_content = sdk_cmd.agent_ssh(host, "cat /var/lib/mesos/slave/volumes/roles/slave_public/{}/test".format(pv_name)) assert pv_content.strip() == "this is a test" check_content() # Scale down the Marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config['instances'] = 0 sdk_marathon.update_app(marathon_app_name, app_config) # Install Hello World sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options) # Make sure the persistent volume is still there check_content() # Uninstall Hello World sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Make sure the persistent volume is still there check_content() # Scale back up the marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config['instances'] = 1 sdk_marathon.update_app(marathon_app_name, app_config) # Make sure the persistent volume is still there check_content() finally: # Reinstall hello world sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options) sdk_marathon.destroy_app(marathon_app_name)
def test_marathon_volume_collision(): # This test validates that a service registered in a sub-role of # slave_public will _not_ unreserve Marathon volumes RESERVED # in the `slave_public` role. # Uninstall HW first sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Install the marathon app marathon_app_name = "persistent-test" volume_name = "persistent-volume" persistent_app = { "id": marathon_app_name, "mem": 128, "user": "******", "cmd": "echo 'this is a test' > {}/test && sleep 10000".format(volume_name), "container": { "type": "MESOS", "volumes": [{ "persistent": { "type": "root", "size": 500, "constraints": [] }, "mode": "RW", "containerPath": volume_name, }], }, } try: sdk_marathon.install_app(persistent_app) # Get its persistent Volume host = sdk_marathon.get_scheduler_host(marathon_app_name) # Should get e.g.: "/var/lib/mesos/slave/volumes/roles/slave_public/persistent-test#persistent-volume#76e7bb6d-64fa-11e8-abc5-8e679b292d5e" rc, pv_path, _ = sdk_cmd.agent_ssh( host, "ls -d /var/lib/mesos/slave/volumes/roles/slave_public/{}#{}#*". format(marathon_app_name, volume_name), ) if rc != 0: log.error("Could not get slave_public roles. return-code: '%s'\n", rc) assert rc == 0 pv_path = pv_path.strip() @retrying.retry(wait_fixed=1000, stop_max_delay=60 * 1000) def check_content(): rc, pv_content, _ = sdk_cmd.agent_ssh( host, "cat {}/test".format(pv_path)) assert rc == 0 and pv_content.strip() == "this is a test" check_content() # Scale down the Marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config["instances"] = 0 sdk_marathon.update_app(app_config) # Install Hello World sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, PRERESERVED_TASK_COUNT, additional_options=pre_reserved_options, ) # Make sure the persistent volume is still there check_content() # Uninstall Hello World sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Make sure the persistent volume is still there check_content() # Scale back up the marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config["instances"] = 1 sdk_marathon.update_app(app_config) # Make sure the persistent volume is still there check_content() finally: # Reinstall hello world sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, PRERESERVED_TASK_COUNT, additional_options=pre_reserved_options, ) sdk_marathon.destroy_app(marathon_app_name)
def test_marathon_volume_collision(): # This test validates that a service registered in a sub-role of # slave_public will _not_ unreserve Marathon volumes RESERVED # in the `slave_public` role. # Uninstall HW first sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Install the marathon app marathon_app_name = "persistent-test" volume_name = "persistent-volume" persistent_app = { "id": marathon_app_name, "mem": 128, "user": "******", "cmd": "echo 'this is a test' > {}/test && sleep 10000".format(volume_name), "container": { "type": "MESOS", "volumes": [ { "persistent": {"type": "root", "size": 500, "constraints": []}, "mode": "RW", "containerPath": volume_name, } ], }, } try: sdk_marathon.install_app(persistent_app) # Get its persistent Volume host = sdk_marathon.get_scheduler_host(marathon_app_name) # Should get e.g.: "/var/lib/mesos/slave/volumes/roles/slave_public/persistent-test#persistent-volume#76e7bb6d-64fa-11e8-abc5-8e679b292d5e" rc, pv_path, _ = sdk_cmd.agent_ssh( host, "ls -d /var/lib/mesos/slave/volumes/roles/slave_public/{}#{}#*".format( marathon_app_name, volume_name ), ) if rc != 0: log.error( "Could not get slave_public roles. return-code: '%s'\n", rc) assert rc == 0 pv_path = pv_path.strip() @retrying.retry(wait_fixed=1000, stop_max_delay=60 * 1000) def check_content(): rc, pv_content, _ = sdk_cmd.agent_ssh(host, "cat {}/test".format(pv_path)) assert rc == 0 and pv_content.strip() == "this is a test" check_content() # Scale down the Marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config["instances"] = 0 sdk_marathon.update_app(app_config) # Install Hello World sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options, ) # Make sure the persistent volume is still there check_content() # Uninstall Hello World sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # Make sure the persistent volume is still there check_content() # Scale back up the marathon app app_config = sdk_marathon.get_config(marathon_app_name) app_config["instances"] = 1 sdk_marathon.update_app(app_config) # Make sure the persistent volume is still there check_content() finally: # Reinstall hello world sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=pre_reserved_options, ) sdk_marathon.destroy_app(marathon_app_name)