def decommission_agent(agent_id: str) -> None: assert sdk_utils.dcos_version_at_least("1.11"), \ "node decommission is supported in DC/OS 1.11 and above only" rc, _, _ = sdk_cmd.run_cli( "node decommission {}".format(agent_id) ) assert rc == 0
def decommission_agent(agent_id: str) -> None: assert sdk_utils.dcos_version_at_least("1.11"),\ "node decommission is supported in DC/OS 1.11 and above only" rc, _, _ = sdk_cmd.run_cli( "node decommission {}".format(agent_id) ) assert rc == 0
def _escape_placement_for_1_9(options: dict) -> dict: # 1.9 requires `\"` to be escaped to `\\\"` # when submitting placement constraints log.info(options) if sdk_utils.dcos_version_at_least("1.10"): log.info("DC/OS version >= 1.10") return options def escape_section_placement(section: str, options: dict) -> dict: if section in options and "placement" in options[section]: options[section]["placement"] = options[section]["placement"].replace('"', '\\"') log.info("Escaping %s", section) log.info(options) return options return escape_section_placement("hello", escape_section_placement("world", options))
def _escape_placement_for_1_9(options: dict) -> dict: # 1.9 requires `\"` to be escaped to `\\\"` # when submitting placement constraints log.info(options) if sdk_utils.dcos_version_at_least("1.10"): log.info("DC/OS version >= 1.10") return options def escape_section_placement(section: str, options: dict) -> dict: if section in options and "placement" in options[section]: options[section]["placement"] = options[section]["placement"].replace("\"", "\\\"") log.info("Escaping %s", section) log.info(options) return options return escape_section_placement("hello", escape_section_placement("world", options))
@pytest.mark.sanity def test_pod_replace(kafka_client: client.KafkaClient): test_utils.replace_broker_pod(config.PACKAGE_NAME, FOLDERED_NAME, config.DEFAULT_POD_TYPE, config.DEFAULT_BROKER_COUNT) kafka_client.connect(config.DEFAULT_BROKER_COUNT) # --------- CLI ------------- @pytest.mark.sanity @pytest.mark.metrics @pytest.mark.dcos_min_version("1.9") @pytest.mark.skipif( sdk_utils.dcos_version_at_least("1.12"), reason="Metrics are not working on 1.12. Reenable once this is fixed", ) def test_metrics(): expected_metrics = [ "kafka.network.RequestMetrics.ResponseQueueTimeMs.max", "kafka.socket-server-metrics.io-ratio", "kafka.controller.ControllerStats.LeaderElectionRateAndTimeMs.p95", ] def expected_metrics_exist(emitted_metrics): return sdk_metrics.check_metrics_presence(emitted_metrics, expected_metrics) sdk_metrics.wait_for_service_metrics( config.PACKAGE_NAME,
def is_cli_supports_service_options_update() -> bool: """Service updates are supported in [EE 1.9+] or [Open 1.11+]""" return sdk_utils.dcos_version_at_least("1.9") and ( not sdk_utils.is_open_dcos() or sdk_utils.dcos_version_at_least("1.11"))
def test_pod_pause_resume(): '''Tests pausing and resuming a pod. Similar to pod restart, except the task is marked with a PAUSED state''' # get current agent id: taskinfo = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] old_agent = taskinfo['slaveId']['value'] old_cmd = taskinfo['command']['value'] # sanity check of pod status/plan status before we pause/resume: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'RUNNING' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'COMPLETE' # pause the pod, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0') jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod pause hello-0', json=True) assert len(jsonobj) == 2 assert jsonobj['pod'] == 'hello-0' assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0] == 'hello-0-server' sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids) config.check_running() # check agent didn't move, and that the command has changed: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] assert old_agent == jsonobj['slaveId']['value'] cmd = jsonobj['command']['value'] assert 'This task is PAUSED' in cmd if sdk_utils.dcos_version_at_least('1.10'): # validate readiness check (default executor) readiness_check = jsonobj['check']['command']['command']['value'] assert 'exit 1' == readiness_check # check PAUSED state in plan and in pod status: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'PAUSED' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'PAUSED' # resume the pod again, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0') jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod resume hello-0', json=True) assert len(jsonobj) == 2 assert jsonobj['pod'] == 'hello-0' assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0] == 'hello-0-server' sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids) config.check_running() # check again that the agent didn't move: taskinfo = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] assert old_agent == taskinfo['slaveId']['value'] assert old_cmd == taskinfo['command']['value'] # check that the pod/plan status is back to normal: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'RUNNING' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'COMPLETE'
def is_cli_supports_service_options_update() -> bool: """Service updates are supported in [EE 1.9+] or [Open 1.11+]""" return sdk_utils.dcos_version_at_least("1.9") and ( not sdk_utils.is_open_dcos() or sdk_utils.dcos_version_at_least("1.11") )
def test_pod_pause_resume(): '''Tests pausing and resuming a pod. Similar to pod restart, except the task is marked with a PAUSED state''' # get current agent id: taskinfo = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] old_agent = taskinfo['slaveId']['value'] old_cmd = taskinfo['command']['value'] # sanity check of pod status/plan status before we pause/resume: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'RUNNING' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'COMPLETE' # pause the pod, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0') jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod pause hello-0', json=True) assert len(jsonobj) == 2 assert jsonobj['pod'] == 'hello-0' assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0] == 'hello-0-server' sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids) config.check_running() # check agent didn't move, and that the command has changed: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] assert old_agent == jsonobj['slaveId']['value'] cmd = jsonobj['command']['value'] assert 'This task is PAUSED' in cmd if sdk_utils.dcos_version_at_least('1.10'): # validate readiness check (default executor) readiness_check = jsonobj['check']['command']['command']['value'] assert 'exit 1' == readiness_check # check PAUSED state in plan and in pod status: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'PAUSED' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'PAUSED' # resume the pod again, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'hello-0') jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'debug pod resume hello-0', json=True) assert len(jsonobj) == 2 assert jsonobj['pod'] == 'hello-0' assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0] == 'hello-0-server' sdk_tasks.check_tasks_updated(config.SERVICE_NAME, 'hello-0', hello_ids) config.check_running() # check again that the agent didn't move: taskinfo = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod info hello-0', json=True, print_output=False)[0]['info'] assert old_agent == taskinfo['slaveId']['value'] assert old_cmd == taskinfo['command']['value'] # check that the pod/plan status is back to normal: jsonobj = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'pod status hello-0 --json', json=True) assert len(jsonobj['tasks']) == 1 assert jsonobj['tasks'][0]['name'] == 'hello-0-server' assert jsonobj['tasks'][0]['status'] == 'RUNNING' phase = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, 'plan status deploy --json', json=True)['phases'][0] assert phase['name'] == 'hello' assert phase['status'] == 'COMPLETE' assert phase['steps'][0]['name'] == 'hello-0:[server]' assert phase['steps'][0]['status'] == 'COMPLETE'
def test_pod_pause_resume(): """Tests pausing and resuming a pod. Similar to pod restart, except the task is marked with a PAUSED state""" # get current agent id: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] old_agent = taskinfo["slaveId"]["value"] old_cmd = taskinfo["command"]["value"] # sanity check of pod status/plan status before we pause/resume: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json") assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "RUNNING" rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json") assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "COMPLETE" # pause the pod, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0") rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "debug pod pause hello-0") assert rc == 0, "Pod pause failed" jsonobj = json.loads(stdout) assert len(jsonobj) == 2 assert jsonobj["pod"] == "hello-0" assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0] == "hello-0-server" sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0", hello_ids) # recovery will not be completed due to 'exit 1' readiness check on paused pod. # it will be IN_PROGRESS if there are other completed recovery operations (prior test cases), or STARTED if there aren't. check_healthy(expected_recovery_state=["STARTED", "IN_PROGRESS"]) # check agent didn't move, and that the command has changed: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] assert old_agent == taskinfo["slaveId"]["value"] cmd = taskinfo["command"]["value"] assert "This task is PAUSED" in cmd if sdk_utils.dcos_version_at_least("1.10"): # validate readiness check (default executor) readiness_check = taskinfo["check"]["command"]["command"]["value"] assert "exit 1" == readiness_check # check PAUSED state in plan and in pod status: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json") assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "PAUSED" rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json") assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "PAUSED" # resume the pod again, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0") rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "debug pod resume hello-0") assert rc == 0, "Pod resume failed" jsonobj = json.loads(stdout) assert len(jsonobj) == 2 assert jsonobj["pod"] == "hello-0" assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0] == "hello-0-server" sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0", hello_ids) check_healthy() # check again that the agent didn't move: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] assert old_agent == taskinfo["slaveId"]["value"] assert old_cmd == taskinfo["command"]["value"] # check that the pod/plan status is back to normal: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json") assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "RUNNING" rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json") assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "COMPLETE"
}, service_name=config.SERVICE_NAME, https=True) document = config.get_document(config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 1, https=True) assert document assert document['_source']['name'] == 'Loren' @pytest.mark.tls @pytest.mark.sanity @pytest.mark.skipif( sdk_utils.dcos_version_at_least('1.12'), reason='MESOS-9008: Mesos Fetcher fails to extract Kibana archive') def test_kibana_tls(kibana_application): config.check_kibana_adminrouter_integration("service/{}/login".format( config.KIBANA_SERVICE_NAME)) @pytest.mark.tls @pytest.mark.sanity @pytest.mark.recovery def test_tls_recovery(elastic_service, service_account): pod_list = sdk_cmd.svc_cli(elastic_service["package_name"], elastic_service["service"]["name"], "pod list", json=True)
def test_pod_pause_resume(): """Tests pausing and resuming a pod. Similar to pod restart, except the task is marked with a PAUSED state""" # get current agent id: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False ) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] old_agent = taskinfo["slaveId"]["value"] old_cmd = taskinfo["command"]["value"] # sanity check of pod status/plan status before we pause/resume: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json" ) assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "RUNNING" rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json" ) assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "COMPLETE" # pause the pod, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0") rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "debug pod pause hello-0" ) assert rc == 0, "Pod pause failed" jsonobj = json.loads(stdout) assert len(jsonobj) == 2 assert jsonobj["pod"] == "hello-0" assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0] == "hello-0-server" sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0", hello_ids) # recovery will not be completed due to 'exit 1' readiness check on paused pod. # it will be IN_PROGRESS if there are other completed recovery operations (prior test cases), or STARTED if there aren't. check_healthy(expected_recovery_state=["STARTED", "IN_PROGRESS"]) # check agent didn't move, and that the command has changed: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False ) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] assert old_agent == taskinfo["slaveId"]["value"] cmd = taskinfo["command"]["value"] assert "This task is PAUSED" in cmd if sdk_utils.dcos_version_at_least("1.10"): # validate readiness check (default executor) readiness_check = taskinfo["check"]["command"]["command"]["value"] assert "exit 1" == readiness_check # check PAUSED state in plan and in pod status: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json" ) assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "PAUSED" rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json" ) assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "PAUSED" # resume the pod again, wait for it to relaunch hello_ids = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello-0") rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "debug pod resume hello-0" ) assert rc == 0, "Pod resume failed" jsonobj = json.loads(stdout) assert len(jsonobj) == 2 assert jsonobj["pod"] == "hello-0" assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0] == "hello-0-server" sdk_tasks.check_tasks_updated(config.SERVICE_NAME, "hello-0", hello_ids) check_healthy() # check again that the agent didn't move: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod info hello-0", print_output=False ) assert rc == 0, "Pod info failed" taskinfo = json.loads(stdout)[0]["info"] assert old_agent == taskinfo["slaveId"]["value"] assert old_cmd == taskinfo["command"]["value"] # check that the pod/plan status is back to normal: rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "pod status hello-0 --json" ) assert rc == 0, "Pod status failed" jsonobj = json.loads(stdout) assert len(jsonobj["tasks"]) == 1 assert jsonobj["tasks"][0]["name"] == "hello-0-server" assert jsonobj["tasks"][0]["status"] == "RUNNING" rc, stdout, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, config.SERVICE_NAME, "plan status deploy --json" ) assert rc == 0, "Plan status failed" phase = json.loads(stdout)["phases"][0] assert phase["name"] == "hello" assert phase["status"] == "COMPLETE" assert phase["steps"][0]["name"] == "hello-0:[server]" assert phase["steps"][0]["status"] == "COMPLETE"
# allocation: # node_initial_primaries_recoveries: 3 # The default value is 4. We're just testing to make sure the YAML formatting survived intact and the setting # got updated in the config. base64_str = 'Y2x1c3RlcjoNCiAgcm91dGluZzoNCiAgICBhbGxvY2F0aW9uOg0KIC' \ 'AgICAgbm9kZV9pbml0aWFsX3ByaW1hcmllc19yZWNvdmVyaWVzOiAz' config.update_app(foldered_name, {'CUSTOM_YAML_BLOCK_BASE64': base64_str}, current_expected_task_count) config.check_custom_elasticsearch_cluster_setting(service_name=foldered_name) sdk_plan.wait_for_completed_deployment(foldered_name) sdk_plan.wait_for_completed_recovery(foldered_name) @pytest.mark.sanity @pytest.mark.timeout(60 * 60) @pytest.mark.skipif(sdk_utils.dcos_version_at_least('1.12'), reason='MESOS-9008: Mesos Fetcher fails to extract Kibana archive') def test_xpack_toggle_with_kibana(default_populated_index): log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status(False, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack disabled...") elasticsearch_url = "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "kibana": { "elasticsearch_url": elasticsearch_url }}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT,