def elastic_service_tls(service_account): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, service_name=config.SERVICE_NAME, expected_running_tasks=config.DEFAULT_TASK_COUNT, additional_options={ "service": { "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "transport_encryption": { "enabled": True } } }, "elasticsearch": { "xpack_enabled": True, } }) yield finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_pods_restart_graceful_shutdown(): options = { "world": { "kill_grace_period": 30 } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=options) world_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'world-0') jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod restart world-0', json=True) assert len(jsonobj) == 2 assert jsonobj['pod'] == 'world-0' assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0] == 'world-0-server' sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'world-0', world_ids) config.check_running() # ensure the SIGTERM was sent via the "all clean" message in the world # service's signal trap/handler, BUT not the shell command, indicated # by "echo". stdout = sdk_cmd.run_cli( "task log --completed --lines=1000 {}".format(world_ids[0])) clean_msg = None for s in stdout.split('\n'): if s.find('echo') < 0 and s.find('all clean') >= 0: clean_msg = s assert clean_msg is not None
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options={"service": { "name": config.get_foldered_service_name() } } else: additional_options={"service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_secrets_dcos_space(): # 1) create secrets in hello-world/somePath, i.e. hello-world/somePath/secret1 ... # 2) Tasks with DCOS_SPACE hello-world/somePath # or some DCOS_SPACE path under hello-world/somePath # (for example hello-world/somePath/anotherPath/) # can access these Secrets sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) # cannot access these secrets because of DCOS_SPACE authorization create_secrets("{}/somePath/".format(config.SERVICE_NAME)) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, NUM_HELLO + NUM_WORLD, additional_options=options_dcos_space_test, timeout_seconds=5 * 60) # Wait for 5 minutes. We don't need to wait 15 minutes for hello-world to fail an install assert False, "Should have failed to install" except AssertionError as arg: raise arg except: pass # expected to fail # clean up and delete secrets delete_secrets("{}/somePath/".format(config.SERVICE_NAME))
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_zones_not_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name } }) test_utils.broker_count_check( config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert broker_info.get('rack') == None sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def cassandra_service(service_account: Dict[str, Any]) -> Iterator[Dict[str, Any]]: """ A pytest fixture that installs the cassandra service. On teardown, the service is uninstalled. """ options = { "service": { "name": config.SERVICE_NAME, # Note that since we wish to toggle TLS which *REQUIRES* a service account, # we need to install Cassandra with a service account to start with. "service_account": service_account["name"], "service_account_secret": service_account["secret"], } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=options, wait_for_deployment=True, ) yield {**options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def hdfs_service_tls(service_account): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, service_name=config.SERVICE_NAME, expected_running_tasks=config.DEFAULT_TASK_COUNT, additional_options={ "service": { "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "transport_encryption": { "enabled": True } } } }, timeout_seconds=30 * 60) sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) yield service_account finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def dynamic_port_config(): install.install(PACKAGE_NAME, DEFAULT_BROKER_COUNT, service_name=SERVICE_NAME, additional_options=DYNAMIC_PORT_OPTIONS_DICT) yield install.uninstall(SERVICE_NAME, PACKAGE_NAME)
def kafka_server(kerberos): """ A pytest fixture that installs a Kerberized kafka service. On teardown, the service is uninstalled. """ service_kerberos_options = { "service": { "name": config.SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } } } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=service_kerberos_options, timeout_seconds=30 * 60) yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_marathon_rack_not_found(): def fun(): try: return service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) except: return False shakedown.install_package(PACKAGE_NAME, service_name=SERVICE_NAME, options_json=install.get_package_options( additional_options={'service':{'placement_constraint':'rack_id:LIKE:rack-foo-.*'}} ), wait_for_completion=False) try: tasks.check_running(PACKAGE_NAME, 1, timeout_seconds=120) assert False, "Should have failed to install" except AssertionError as arg: raise arg except: pass # expected to fail pl = spin.time_wait_return(fun) # check that first node is still (unsuccessfully) looking for a match: assert pl['status'] == 'IN_PROGRESS' assert pl['phases'][0]['status'] == 'IN_PROGRESS' # if so early, it can be PREPARED ? assert pl['phases'][0]['steps'][0]['status'] in ('PREPARED', 'PENDING') assert pl['phases'][0]['steps'][1]['status'] == 'PENDING' assert pl['phases'][0]['steps'][2]['status'] == 'PENDING' install.uninstall(SERVICE_NAME, PACKAGE_NAME)
def test_secrets_basic(): # 1) create Secrets # 2) install examples/secrets.yml # 3) if secret file is not created, tasks will fail # 4) wait till deployment finishes # 5) do replace operation # 6) ensure all tasks are running # 7) delete Secrets sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) create_secrets("{}/".format(config.SERVICE_NAME)) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, NUM_HELLO + NUM_WORLD, additional_options=secret_options) hello_tasks_0 = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0-server") world_tasks_0 = sdk_tasks.get_task_ids(config.SERVICE_NAME, "word-0-server") # ensure that secrets work after replace sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod replace hello-0') sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod replace world-0') sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0-server", hello_tasks_0) sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'world-0-server', world_tasks_0) # tasks will fail if secret files are not created by mesos module sdk_tasks.check_running(config.SERVICE_NAME, NUM_HELLO + NUM_WORLD) # clean up and delete secrets delete_secrets("{}/".format(config.SERVICE_NAME))
def kafka_server(zookeeper_server): try: # Get the zookeeper DNS values zookeeper_dns = sdk_cmd.svc_cli(zookeeper_server["package_name"], zookeeper_server["service"]["name"], "endpoint clientport", json=True)["dns"] sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "kafka": { "kafka_zookeeper_uri": ",".join(zookeeper_dns) } }) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def hdfs_server(kerberos): """ A pytest fixture that installs a Kerberized HDFS service. On teardown, the service is uninstalled. """ service_options = { "service": { "name": config.SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())}, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } }, }, "hdfs": {"security_auth_to_local": auth.get_principal_to_user_mapping()}, } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=service_options, timeout_seconds=30 * 60, ) yield {**service_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_install_without_additional_principal_to_user_mapping(kerberos, service_account): try: service_options = { "service": { "name": config.SERVICE_NAME, "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "kerberos": { "enabled": True, "debug": True, "kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())}, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } }, } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=service_options, timeout_seconds=30 * 60, ) finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_zones_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name, "placement_constraint": "[[\"@zone\", \"GROUP_BY\"]]" } }) test_utils.broker_count_check( config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert sdk_fault_domain.is_valid_zone(broker_info.get('rack')) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def configure_package(configure_security: None) -> Iterator[None]: try: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name())
def kafka_server(service_account): """ A pytest fixture that installs a non-kerberized kafka service. On teardown, the service is uninstalled. """ service_options = { "service": { "name": config.SERVICE_NAME, # Note that since we wish to toggle TLS which *REQUIRES* a service account, # we need to install Kafka with a service account to start with. "service_account": service_account["name"], "service_account_secret": service_account["secret"], } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=service_options, timeout_seconds=30 * 60) yield {**service_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def kafka_service_tls(service_account): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "transport_encryption": { "enabled": True } } } } ) sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) yield service_account finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_hostname_unique(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) options = _escape_placement_for_1_9( { "service": {"yaml": "marathon_constraint"}, "hello": {"count": get_num_private_agents(), "placement": '[["hostname", "UNIQUE"]]'}, "world": {"count": get_num_private_agents(), "placement": '[["hostname", "UNIQUE"]]'}, } ) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, get_num_private_agents() * 2, additional_options=options, ) # hello deploys first. One "world" task should end up placed with each "hello" task. # ensure "hello" task can still be placed with "world" task old_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0") sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod replace hello-0") sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0", old_ids) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) sdk_tasks.check_running( config.SERVICE_NAME, get_num_private_agents() * 2 - 1, timeout_seconds=10 ) sdk_tasks.check_running(config.SERVICE_NAME, get_num_private_agents() * 2) ensure_count_per_agent(hello_count=1, world_count=1)
def setup_constraint_switch(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) agents = sdk_agents.get_private_agents() some_agent = agents[0]["hostname"] other_agent = agents[1]["hostname"] log.info("Agents: %s %s", some_agent, other_agent) assert some_agent != other_agent options = _escape_placement_for_1_9( { "service": {"yaml": "marathon_constraint"}, "hello": { "count": 1, # First, we stick the pod to some_agent "placement": '[["hostname", "LIKE", "{}"]]'.format(some_agent), }, "world": {"count": 0}, } ) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options) sdk_tasks.check_running(config.SERVICE_NAME, 1) hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello") # Now, stick it to other_agent marathon_config = sdk_marathon.get_config(config.SERVICE_NAME) marathon_config["env"]["HELLO_PLACEMENT"] = '[["hostname", "LIKE", "{}"]]'.format(other_agent) sdk_marathon.update_app(marathon_config) # Wait for the scheduler to be up and settled before advancing. sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) return some_agent, other_agent, hello_ids
def test_zones_referenced_in_placement_constraints(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options={ "master_nodes": { "placement": "[[\"@zone\", \"GROUP_BY\"]]" }, "data_nodes": { "placement": "[[\"@zone\", \"GROUP_BY\"]]" }, "ingest_nodes": { "placement": "[[\"@zone\", \"GROUP_BY\"]]" }, "coordinator_nodes": { "placement": "[[\"@zone\", \"GROUP_BY\"]]" } }) nodes_info = config.get_elasticsearch_nodes_info( service_name=config.SERVICE_NAME) for node_uid, node in nodes_info["nodes"].items(): assert "zone" == sdk_utils.get_in([ "settings", "cluster", "routing", "allocation", "awareness", "attributes" ], node) assert sdk_fault_domain.is_valid_zone( sdk_utils.get_in(["attributes", "zone"], node)) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_security: None) -> Iterator[None]: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_all_jobs(auth=True) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) create_secret( secret_value=config.SECRET_VALUE, secret_path=config.PACKAGE_NAME + '/' + config.SECRET_VALUE ) service_options = { "service": { "name": config.SERVICE_NAME, "security": {"authentication": {"enabled": True}, "authorization": {"enabled": True}}, } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=service_options, ) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) delete_secret(secret=config.PACKAGE_NAME + '/' + config.SECRET_VALUE) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) if sdk_utils.dcos_version_less_than("1.9"): # HDFS upgrade in 1.8 is not supported. sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, additional_options={"service": {"name": foldered_name}}, timeout_seconds=30 * 60, ) else: sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, from_options={"service": {"name": foldered_name}}, timeout_seconds=30 * 60, ) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def cassandra_service_tls(service_account): sdk_install.uninstall(package_name=config.PACKAGE_NAME, service_name=config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options={ "service": { "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "transport_encryption": { "enabled": True } } } } ) sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) # Wait for service health check to pass shakedown.service_healthy(config.SERVICE_NAME) yield sdk_install.uninstall(package_name=config.PACKAGE_NAME, service_name=config.SERVICE_NAME)
def zookeeper_server(kerberos): service_kerberos_options = { "service": { "name": config.ZOOKEEPER_SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } } } } try: sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME) sdk_install.install( config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME, config.ZOOKEEPER_TASK_COUNT, additional_options=service_kerberos_options, timeout_seconds=30 * 60) yield {**service_kerberos_options, **{"package_name": config.ZOOKEEPER_PACKAGE_NAME}} finally: sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs() # destroy any leftover jobs first, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.remove_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in test_jobs: sdk_jobs.install_job(job, tmp_dir=tmp_dir) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) for job in test_jobs: sdk_jobs.remove_job(job)
def setup_constraint_switch(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) agents = shakedown.get_private_agents() some_agent = agents[0] other_agent = agents[1] log.info('Agents: %s %s', some_agent, other_agent) assert some_agent != other_agent options = _escape_placement_for_1_9({ "service": { "yaml": "marathon_constraint" }, "hello": { "count": 1, # First, we stick the pod to some_agent "placement": "[[\"hostname\", \"LIKE\", \"{}\"]]".format(some_agent) }, "world": { "count": 0 } }) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options) sdk_tasks.check_running(config.SERVICE_NAME, 1) hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello') # Now, stick it to other_agent marathon_config = sdk_marathon.get_config(config.SERVICE_NAME) marathon_config['env']['HELLO_PLACEMENT'] = "[[\"hostname\", \"LIKE\", \"{}\"]]".format(other_agent) sdk_marathon.update_app(config.SERVICE_NAME, marathon_config) # Wait for the scheduler to be up and settled before advancing. sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) return some_agent, other_agent, hello_ids
def test_dynamic_port_comes_online(): sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=DYNAMIC_PORT_OPTIONS_DICT) sdk_tasks.check_running(config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_security: None) -> Iterator[None]: try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs() # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) for job in test_jobs: sdk_jobs.remove_job(job)
def setup_module(module): install.uninstall(FOLDERED_SERVICE_NAME, package_name=PACKAGE_NAME) utils.gc_frameworks() # 1. check_suppression=False due to https://jira.mesosphere.com/browse/CASSANDRA-568 # 2. user: root because Azure CLI needs to run in root... install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, service_name=FOLDERED_SERVICE_NAME, additional_options={ "service": { "name": FOLDERED_SERVICE_NAME, "user": "******" } }, check_suppression=False) plan.wait_for_completed_deployment(FOLDERED_SERVICE_NAME) tmp_dir = tempfile.mkdtemp(prefix='cassandra-test') for job in TEST_JOBS: jobs.install_job(job, tmp_dir=tmp_dir)
def test_detect_zones_enabled(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options={"service": { "detect_zones": True }}) nodes_info = config.get_elasticsearch_nodes_info( service_name=config.SERVICE_NAME) for node_uid, node in nodes_info["nodes"].items(): assert "zone" == get_in([ "settings", "cluster", "routing", "allocation", "awareness", "attributes" ], node) assert sdk_fault_domain.is_valid_zone( get_in(["attributes", "zone"], node)) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_unique_vips(): @retrying.retry(wait_exponential_multiplier=1000, stop_max_attempt_number=7) # ~2 minutes def verify_ip_is_reachable(ip): ok, _ = sdk_cmd.master_ssh("curl -v {}".format(ip)) assert ok spark1_service_name = "test/groupa/spark" spark2_service_name = "test/groupb/spark" try: utils.require_spark(spark1_service_name) utils.require_spark(spark2_service_name) dispatcher1_ui_ip = sdk_hosts.vip_host("marathon", "dispatcher.{}".format(spark1_service_name), 4040) dispatcher2_ui_ip = sdk_hosts.vip_host("marathon", "dispatcher.{}".format(spark2_service_name), 4040) verify_ip_is_reachable(dispatcher1_ui_ip) verify_ip_is_reachable(dispatcher2_ui_ip) finally: sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark1_service_name) sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark2_service_name)
def test_max_per_hostname(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) options = { "service": { "spec_file": "examples/marathon_constraint.yml" }, "hello": { "count": config.get_num_private_agents() * 2, "placement": "hostname:MAX_PER:2" }, "world": { "count": config.get_num_private_agents() * 3, "placement": "hostname:MAX_PER:3" } } sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.get_num_private_agents() * 5, additional_options=options) ensure_max_count_per_agent(hello_count=2, world_count=3)
def cassandra_service_tls(service_account): sdk_install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, service_name=service_account, additional_options={ "service": { "service_account_secret": service_account, "service_account": service_account, "tls": True, "tls_allow_plaintext": False, } }) sdk_plan.wait_for_completed_deployment(PACKAGE_NAME) # Wait for service health check to pass shakedown.service_healthy(PACKAGE_NAME) yield sdk_install.uninstall(PACKAGE_NAME)
def test_unique_vips(): spark1_service_name = "test/groupa/spark" spark2_service_name = "test/groupb/spark" try: utils.require_spark(spark1_service_name) utils.require_spark(spark2_service_name) dispatcher1_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark1_service_name), 4040) dispatcher2_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark2_service_name), 4040) # verify dispatcher-ui is reachable at VIP ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher1_ui)) assert ok ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher2_ui)) assert ok finally: sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark1_service_name) sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark2_service_name)
def test_max_per_hostname(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) options = _escape_placement_for_1_9({ "service": { "yaml": "marathon_constraint" }, "hello": { "count": config.get_num_private_agents() * 2, "placement": "[[\"hostname\", \"MAX_PER\", \"2\"]]" }, "world": { "count": config.get_num_private_agents() * 3, "placement": "[[\"hostname\", \"MAX_PER\", \"3\"]]" } }) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.get_num_private_agents() * 5, additional_options=options) ensure_max_count_per_agent(hello_count=2, world_count=3)
def test_cluster(): install.uninstall(PACKAGE_NAME) some_agent = shakedown.get_private_agents().pop() options = { "service": { "spec_file": "examples/marathon_constraint.yml" }, "hello": { "count": num_private_agents, "placement": "hostname:CLUSTER:{}".format(some_agent) }, "world": { "count": 0 } } install.install(PACKAGE_NAME, num_private_agents, additional_options=options) plan.wait_for_completed_deployment(PACKAGE_NAME) ensure_count_per_agent(hello_count=num_private_agents, world_count=0)
def test_rr_by_hostname(): install.uninstall(PACKAGE_NAME) options = { "service": { "spec_file": "examples/marathon_constraint.yml" }, "hello": { "count": num_private_agents * 2, "placement": "hostname:GROUP_BY:{}".format(num_private_agents) }, "world": { "count": num_private_agents * 2, "placement": "hostname:GROUP_BY:{}".format(num_private_agents) } } install.install(PACKAGE_NAME, num_private_agents * 4, additional_options=options) plan.wait_for_completed_deployment(PACKAGE_NAME) ensure_count_per_agent(hello_count=2, world_count=2)
def test_max_per_hostname(): install.uninstall(PACKAGE_NAME) options = { "service": { "spec_file": "examples/marathon_constraint.yml" }, "hello": { "count": num_private_agents * 2, "placement": "hostname:MAX_PER:2" }, "world": { "count": num_private_agents * 3, "placement": "hostname:MAX_PER:3" } } install.install(PACKAGE_NAME, num_private_agents * 5, additional_options=options) plan.wait_for_completed_deployment(PACKAGE_NAME) ensure_count_per_agent(hello_count=2, world_count=3)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # user=root because Azure CLI needs to run in root... # We don't run the Azure tests in strict however, so don't set it then. if os.environ.get("SECURITY") == "strict": additional_options = { "service": { "name": config.get_foldered_service_name() } } else: additional_options = { "service": { "name": config.get_foldered_service_name(), "user": "******" } } sdk_install.install(config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options=additional_options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def zookeeper_server(kerberos): service_kerberos_options = { "service": { "name": config.ZOOKEEPER_SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } }, } } try: sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME) sdk_install.install( config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME, config.ZOOKEEPER_TASK_COUNT, package_version=config.ZOOKEEPER_PACKAGE_VERSION, additional_options=service_kerberos_options, timeout_seconds=30 * 60, ) yield { **service_kerberos_options, **{ "package_name": config.ZOOKEEPER_PACKAGE_NAME } } finally: sdk_install.uninstall(config.ZOOKEEPER_PACKAGE_NAME, config.ZOOKEEPER_SERVICE_NAME)
def kafka_server(kerberos, service_account): """ A pytest fixture that installs a Kerberized kafka service. On teardown, the service is uninstalled. """ service_kerberos_options = { "service": { "name": config.SERVICE_NAME, "service_account": service_account, "service_account_secret": service_account, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": sdk_auth.REALM, "keytab_secret": kerberos.get_keytab_path(), }, "transport_encryption": { "enabled": True } } } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=service_kerberos_options, timeout_seconds=30 * 60) yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def kafka_server(kerberos, zookeeper_server): # Get the zookeeper DNS values zookeeper_dns = sdk_cmd.svc_cli(zookeeper_server["package_name"], zookeeper_server["service"]["name"], "endpoint clientport", json=True)["dns"] service_options = { "service": { "name": config.SERVICE_NAME, "security": { "kerberos": { "enabled": True, "enabled_for_zookeeper": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } } }, "kafka": { "kafka_zookeeper_uri": ",".join(zookeeper_dns) } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=service_options, timeout_seconds=30 * 60) yield {**service_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def kerberos(configure_security): try: principals = get_principals() kerberos_env = sdk_auth.KerberosEnvironment() kerberos_env.add_principals(principals) kerberos_env.finalize() service_kerberos_options = { "service": { "name": config.FOLDERED_SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos_env.get_host(), "port": int(kerberos_env.get_port()) }, "keytab_secret": kerberos_env.get_keytab_path(), "realm": sdk_auth.REALM } } }, "hdfs": { "security_auth_to_local": get_principal_to_user_mapping() } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.install(config.PACKAGE_NAME, config.FOLDERED_SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=service_kerberos_options, timeout_seconds=30 * 60) yield kerberos_env finally: sdk_install.uninstall(config.PACKAGE_NAME, config.FOLDERED_SERVICE_NAME) if kerberos_env: kerberos_env.cleanup()
def cleanup(args): input_filename = args["<output_file>"] log.info("Reading service definition from %s", input_filename) with open(input_filename) as fp: services = json.load(fp) for k, services in services.items(): log.info("Processing cleanup of %s", k) for s in services: service_name = s["service"]["name"] log.info("Uninstalling %s with name %s", s["package_name"], service_name) sdk_install.uninstall(s["package_name"], service_name) service_account = sdk_utils.get_in(["service", "service_account"], s) if service_account: log.info("Removing service accounts and secrets") sdk_security.cleanup_security(service_name, s["service_account_info"])
def kafka_server(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) # Since the tests below interact with the brokers, ensure that the DNS resolves test_utils.wait_for_broker_dns(config.PACKAGE_NAME, config.SERVICE_NAME) yield { "package_name": config.PACKAGE_NAME, "service": { "name": config.SERVICE_NAME } } finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_cluster(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) some_agent = shakedown.get_private_agents().pop() options = { "service": { "spec_file": "examples/marathon_constraint.yml" }, "hello": { "count": config.get_num_private_agents(), "placement": "hostname:CLUSTER:{}".format(some_agent) }, "world": { "count": 0 } } sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, config.get_num_private_agents(), additional_options=options) ensure_count_per_agent(hello_count=config.get_num_private_agents(), world_count=0)
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME) # due to canary: no tasks should launch, and suppressed shouldn't be set sdk_install.install(config.PACKAGE_NAME, 0, additional_options={ 'service': { 'spec_file': 'examples/canary.yml' }, 'hello': { 'count': 4 }, 'world': { 'count': 4 } }, wait_for_deployment=False) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME)
def configure_package(configure_security): test_jobs = [] try: test_jobs = config.get_all_jobs(node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) sdk_upgrade.test_upgrade( config.PACKAGE_NAME, config.get_foldered_service_name(), config.DEFAULT_TASK_COUNT, additional_options={"service": {"name": config.get_foldered_service_name()}}, ) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.get_foldered_service_name()) for job in test_jobs: sdk_jobs.remove_job(job)
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) options = { "service": { "test_profile_volume_command": "df -t xfs", "yaml": "pod-profile-mount-volume" }, "hello": { "volume_profile": "xfs" # hardcoded in `tools/create_testing_volumes.py`. } } sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, NUM_HELLO, additional_options=options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def hdfs_server(kerberos): """ A pytest fixture that installs a Kerberized HDFS service. On teardown, the service is uninstalled. """ service_options = { "service": { "name": config.SERVICE_NAME, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": kerberos.get_host(), "port": int(kerberos.get_port()) }, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), } }, }, "hdfs": { "security_auth_to_local": auth.get_principal_to_user_mapping() }, } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_TASK_COUNT, additional_options=service_options, timeout_seconds=30 * 60, ) yield {**service_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_zookeeper): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) zookeeper_framework_host = "{}.autoip.dcos.thisdcos.directory:1140".format(ZK_SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "kafka": { "kafka_zookeeper_uri": "zookeeper-0-server.{host},zookeeper-0-server.{host},zookeeper-0-server.{host}".format(host=zookeeper_framework_host) } }) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def kafka_server(kerberos, service_account): """ A pytest fixture that installs a Kerberized kafka service. On teardown, the service is uninstalled. """ service_kerberos_options = { "service": { "name": config.SERVICE_NAME, "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "kerberos": { "enabled": True, "kdc": {"hostname": kerberos.get_host(), "port": int(kerberos.get_port())}, "realm": kerberos.get_realm(), "keytab_secret": kerberos.get_keytab_path(), }, "transport_encryption": { "enabled": True, "ciphers": "TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_CBC_SHA256,TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384", }, }, } } sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) try: sdk_install.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options=service_kerberos_options, timeout_seconds=30 * 60, ) yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_mesos_v0_api(): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) # Install Elastic using the v0 api. # Then, clean up afterwards. sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, current_expected_task_count, additional_options={"service": {"name": foldered_name, "mesos_api_version": "V0"}} ) sdk_tasks.check_running(foldered_name, current_expected_task_count) finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) # reinstall the v1 version for the following tests sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_TASK_COUNT, additional_options={"service": {"name": foldered_name}})
def hello_world_service(service_account): sdk_install.install( PACKAGE_NAME, 1, service_name=service_account, additional_options={ "service": { "spec_file": "examples/tls.yml", "secret_name": service_account, "principal": service_account, }, "tls": { "discovery_task_prefix": DISCOVERY_TASK_PREFIX, }, } ) sdk_plan.wait_for_completed_deployment(PACKAGE_NAME) # Wait for service health check to pass shakedown.service_healthy(PACKAGE_NAME) # TODO(mh): Add proper wait for health check time.sleep(15) yield service_account sdk_install.uninstall(PACKAGE_NAME) # Make sure that all the TLS artifacts were removed from the secrets store. output = sdk_cmd.run_cli('security secrets list {name}'.format( name=PACKAGE_NAME)) artifact_suffixes = [ 'certificate', 'private-key', 'root-ca-certificate', 'keystore', 'truststore' ] for suffix in artifact_suffixes: assert suffix not in output
def setup_constraint_switch(): sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) agents = shakedown.get_private_agents() some_agent = agents[0] other_agent = agents[1] log.info('Agents: %s %s', some_agent, other_agent) assert some_agent != other_agent options = _escape_placement_for_1_9({ "service": { "yaml": "marathon_constraint" }, "hello": { "count": 1, # First, we stick the pod to some_agent "placement": "[[\"hostname\", \"LIKE\", \"{}\"]]".format(some_agent) }, "world": { "count": 0 } }) sdk_install.install(config.PACKAGE_NAME, config.SERVICE_NAME, 1, additional_options=options) sdk_tasks.check_running(config.SERVICE_NAME, 1) hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello') # Now, stick it to other_agent marathon_config = sdk_marathon.get_config(config.SERVICE_NAME) marathon_config['env'][ 'HELLO_PLACEMENT'] = "[[\"hostname\", \"LIKE\", \"{}\"]]".format( other_agent) sdk_marathon.update_app(config.SERVICE_NAME, marathon_config) # Wait for the scheduler to be up and settled before advancing. sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME) return some_agent, other_agent, hello_ids
def uninstall_packages(configure_security: None) -> Iterator[None]: try: log.info( "Ensuring Elastic and Kibana are uninstalled before running test") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) yield # let the test session execute finally: log.info( "Ensuring Elastic and Kibana are uninstalled after running test") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)