def _retried_run_janitor(service_name): auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip() cmd_list = ["docker", "run", "mesosphere/janitor", "/janitor.py", "-r", sdk_utils.get_role(service_name), "-p", service_name + '-principal', "-z", sdk_utils.get_zk_path(service_name), "--auth_token={}".format(auth_token)] sdk_cmd.master_ssh(" ".join(cmd_list))
def submit_dispatcher_request(request_filename): with open(os.path.join(THIS_DIR, 'resources', request_filename), 'r') as file: request = file.read() curl_command = '''curl -d '{}' -H "Content-Type: application/json" -X POST "http://{}:{}/v1/submissions/create"''' \ .format(request, ip, port) success, output = sdk_cmd.master_ssh(curl_command) assert success return json.loads(output)['action']
def run_janitor(service_name, role, service_account, znode): if role is None: role = sdk_utils.get_deslashed_service_name(service_name) + '-role' if service_account is None: service_account = service_name + '-principal' if znode is None: znode = sdk_utils.get_zk_path(service_name) auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip() cmd_list = ["docker", "run", "mesosphere/janitor", "/janitor.py", "-r", role, "-p", service_account, "-z", znode, "--auth_token={}".format(auth_token)] cmd = " ".join(cmd_list) sdk_cmd.master_ssh(cmd)
def run_janitor(service_name, role, service_account, znode): if role is None: role = sdk_utils.get_deslashed_service_name(service_name) + '-role' if service_account is None: service_account = service_name + '-principal' if znode is None: znode = sdk_utils.get_zk_path(service_name) auth_token = sdk_cmd.run_cli('config show core.dcos_acs_token', print_output=False).strip() cmd_list = [ "docker", "run", "mesosphere/janitor", "/janitor.py", "-r", role, "-p", service_account, "-z", znode, "--auth_token={}".format(auth_token) ] cmd = " ".join(cmd_list) sdk_cmd.master_ssh(cmd)
def call_shakedown(): cmd = "curl localhost:8123/v1/enumerate" is_ok, out = sdk_cmd.master_ssh(cmd) assert is_ok, "Failed to get srv records from master SSH: {}".format(cmd) try: srvs = json.loads(out) except Exception: log.exception("Error converting out=%s to json", out) raise return srvs
def test_unique_vips(): spark1_service_name = "test/groupa/spark" spark2_service_name = "test/groupb/spark" try: utils.require_spark(spark1_service_name) utils.require_spark(spark2_service_name) dispatcher1_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark1_service_name), 4040) dispatcher2_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark2_service_name), 4040) # verify dispatcher-ui is reachable at VIP ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher1_ui)) assert ok ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher2_ui)) assert ok finally: sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark1_service_name) sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark2_service_name)
def call_shakedown(): cmd = "curl localhost:8123/v1/enumerate" is_ok, out = sdk_cmd.master_ssh(cmd) assert is_ok, "Failed to get srv records from master SSH: {}".format(cmd) try: srvs = json.loads(out) except Exception as e: log.error("Error converting out=%s to json", out) log.error(e) raise e return srvs
def get_crypto_id_domain(): """ Returns the cluster cryptographic ID equivalent of autoip.dcos.thisdcos.directory. These addresses are routable within the cluster but can be used to test setting a custom service domain. """ rc, stdout, _ = sdk_cmd.master_ssh("curl localhost:62080/lashup/key/") assert rc == 0 crypto_id = json.loads(stdout.strip())["zbase32_public_key"] return "autoip.dcos.{}.dcos.directory".format(crypto_id)
def get_crypto_id_domain(): """ Returns the cluster cryptographic ID equivalent of autoip.dcos.thisdcos.directory. These addresses are routable within the cluster but can be used to test setting a custom service domain. """ ok, lashup_response = sdk_cmd.master_ssh("curl localhost:62080/lashup/key/") assert ok crypto_id = json.loads(lashup_response.strip())["zbase32_public_key"] return "autoip.dcos.{}.dcos.directory".format(crypto_id)
def _retried_run_janitor(service_name): cmd_list = [ "docker", "run", "mesosphere/janitor", "/janitor.py", "-r", sdk_utils.get_role(service_name), "-p", service_name + "-principal", "-z", sdk_utils.get_zk_path(service_name), "--auth_token={}".format(sdk_utils.dcos_token()), ] rc, _, _ = sdk_cmd.master_ssh(" ".join(cmd_list)) assert rc == 0, "Janitor command failed"
def wait_for_valid_srv_records(): cmd = "curl localhost:8123/v1/enumerate" rc, stdout, _ = sdk_cmd.master_ssh(cmd) assert rc == 0, "Failed to get srv records from master SSH: {}".format( cmd) try: srvs = json.loads(stdout) except Exception: log.exception("Failed to parse JSON endpoints: %s", stdout) raise try: # find the framework matching our expected name which has one or more tasks. # we can end up with "duplicate" frameworks left over from previous tests where the framework didn't successfully unregister. # in practice these "duplicate"s will appear as a framework entry with an empty list of tasks. framework_srvs = [ f for f in srvs["frameworks"] if f["name"] == config.SERVICE_NAME and len(f["tasks"]) > 0 ] assert len( framework_srvs ) == 1, "Expected exactly one entry for service {}: {}".format( config.SERVICE_NAME, framework_srvs) framework_srv = framework_srvs[0] assert "tasks" in framework_srv, "Framework SRV records missing 'tasks': {}".format( framework_srv) # Mapping of task_name => [srv_name_1, srv_name_2, ...] task_to_srv_names = {} for t in framework_srv["tasks"]: if t["name"] in task_to_srv_names: assert False, "Got multiple entries for task {}: {}".format( t["name"], framework_srv) task_to_srv_names[t["name"]] = [ r["name"] for r in t["records"] ] check_expected_srv_records(task_to_srv_names) except Exception: # Log the assert message before retrying (or giving up) log.exception("SRV record validation failed, trying again...") raise
def wait_for_valid_srv_records(): cmd = "curl localhost:8123/v1/enumerate" rc, stdout, _ = sdk_cmd.master_ssh(cmd) assert rc == 0, "Failed to get srv records from master SSH: {}".format(cmd) try: srvs = json.loads(stdout) except Exception: log.exception("Failed to parse JSON endpoints: %s", stdout) raise try: # find the framework matching our expected name which has one or more tasks. # we can end up with "duplicate" frameworks left over from previous tests where the framework didn't successfully unregister. # in practice these "duplicate"s will appear as a framework entry with an empty list of tasks. framework_srvs = [ f for f in srvs["frameworks"] if f["name"] == config.SERVICE_NAME and len(f["tasks"]) > 0 ] assert len(framework_srvs) == 1, "Expected exactly one entry for service {}: {}".format( config.SERVICE_NAME, framework_srvs ) framework_srv = framework_srvs[0] assert "tasks" in framework_srv, "Framework SRV records missing 'tasks': {}".format( framework_srv ) # Mapping of task_name => [srv_name_1, srv_name_2, ...] task_to_srv_names = {} for t in framework_srv["tasks"]: if t["name"] in task_to_srv_names: assert False, "Got multiple entries for task {}: {}".format(t["name"], framework_srv) task_to_srv_names[t["name"]] = [r["name"] for r in t["records"]] check_expected_srv_records(task_to_srv_names) except Exception: # Log the assert message before retrying (or giving up) log.exception("SRV record validation failed, trying again...") raise
def check_kibana_adminrouter_integration(path): curl_cmd = "curl -I -k -H \"Authorization: token={}\" -s {}/{}".format( shakedown.dcos_acs_token(), shakedown.dcos_url().rstrip('/'), path.lstrip('/')) exit_ok, output = sdk_cmd.master_ssh(curl_cmd) return exit_ok and output and "HTTP/1.1 200" in output
def check_kibana_adminrouter_integration(path: str) -> bool: curl_cmd = 'curl -L -I -k -H "Authorization: token={}" -s {}/{}'.format( sdk_utils.dcos_token(), sdk_utils.dcos_url().rstrip("/"), path.lstrip("/") ) rc, stdout, _ = sdk_cmd.master_ssh(curl_cmd) return bool(rc == 0 and stdout and "HTTP/1.1 200" in stdout)
def fn(): exit_status, output = sdk_cmd.master_ssh(_curl_https_get_code(host)) return exit_status and output == '200'
def fn(): return sdk_cmd.master_ssh(full_command)
def _check_proxy_was_used() -> None: rc, stdout, stderr = sdk_cmd.master_ssh( "sudo docker logs py_proxy 2>&1 | grep 's3.amazonaws.com'") assert rc == 0 and "s3.amazonaws.com" in stdout
def _check_proxy_healthy(host: str, port: int, uri: str) -> None: rc, stdout, stderr = sdk_cmd.master_ssh( "curl -so /dev/null -w {} --proxy {}:{} {}".format( "'%{http_code}'", host, port, uri)) assert rc == 0 and stdout == "200"
def _uninstall_and_kill_proxy_before_install() -> None: sdk_cmd.master_ssh( "sudo docker stop py_proxy ; sudo docker rmi mesosphere/proxy.py:a0021cdb3ab913495b8da53c8dc1081b895f3ef2" )
def _uninstall_and_kill_proxy() -> None: rc, stdout, stderr = sdk_cmd.master_ssh( "sudo docker stop py_proxy ; sudo docker rmi mesosphere/proxy.py:a0021cdb3ab913495b8da53c8dc1081b895f3ef2" ) assert rc == 0
def verify_ip_is_reachable(ip): ok, _ = sdk_cmd.master_ssh("curl -v {}".format(ip)) assert ok
def _install_and_run_proxy(host: str, port: int) -> None: rc, stdout, stderr = sdk_cmd.master_ssh( "sudo docker run --name py_proxy --rm -d --net=host mesosphere/proxy.py:a0021cdb3ab913495b8da53c8dc1081b895f3ef2 --hostname={} --port={}" .format(host, port)) assert rc == 0
def check_kibana_adminrouter_integration(path: str) -> bool: curl_cmd = 'curl -L -I -k -H "Authorization: token={}" -s -X GET {}/{}'.format( sdk_utils.dcos_token(), sdk_utils.dcos_url().rstrip("/"), path.lstrip("/")) rc, stdout, _ = sdk_cmd.master_ssh(curl_cmd) return bool(rc == 0 and stdout and "HTTP/1.1 200" in stdout)
def fn(): rc, stdout, _ = sdk_cmd.master_ssh(_curl_https_get_code(host)) return rc == 0 and stdout == "200"