def test_xpack_toggle_with_kibana(default_populated_index): log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status( False, service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Test kibana with X-Pack disabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200) }}) shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME)) log.info("Uninstall kibana with X-Pack disabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Set/verify X-Pack enabled in elasticsearch") config.enable_xpack(service_name=FOLDERED_SERVICE_NAME) config.verify_commercial_api_status( True, service_name=FOLDERED_SERVICE_NAME) config.verify_xpack_license(service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Write some data while enabled, disable X-Pack, and verify we can still read what we wrote.") config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, {"name": "X-Pack", "role": "commercial plugin"}, service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Test kibana with X-Pack enabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200), "xpack_enabled": True }}) log.info("\n***** Installing Kibana w/X-Pack can take as much as 15 minutes for Marathon deployment ") log.info( "to complete due to a configured HTTP health check. (typical: 12 minutes)") shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/login".format(config.KIBANA_PACKAGE_NAME)) log.info("\n***** Uninstall kibana with X-Pack enabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Disable X-Pack in elasticsearch.") config.disable_xpack(service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Verify we can still read what we wrote when X-Pack was enabled.") config.verify_commercial_api_status( False, service_name=FOLDERED_SERVICE_NAME) doc = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, service_name=FOLDERED_SERVICE_NAME) assert doc["_source"]["name"] == "X-Pack"
def test_xpack_toggle_with_kibana(default_populated_index): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status(False, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack disabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": {"elasticsearch_url": "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200)}}) shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME)) log.info("Uninstall kibana with X-Pack disabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Set/verify X-Pack enabled in elasticsearch. Requires parallel upgrade strategy for full restart.") config.enable_xpack(service_name=foldered_name) config.verify_commercial_api_status(True, service_name=foldered_name) config.verify_xpack_license(service_name=foldered_name) log.info("\n***** Write some data while enabled, disable X-Pack, and verify we can still read what we wrote.") config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, {"name": "X-Pack", "role": "commercial plugin"}, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack enabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200), "xpack_enabled": True }}) log.info("\n***** Installing Kibana w/X-Pack can take as much as 15 minutes for Marathon deployment ") log.info("to complete due to a configured HTTP health check. (typical: 12 minutes)") shakedown.deployment_wait(app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration("service/{}/login".format(config.KIBANA_PACKAGE_NAME)) log.info("\n***** Uninstall kibana with X-Pack enabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Disable X-Pack in elasticsearch.") config.disable_xpack(service_name=foldered_name) log.info("\n***** Verify we can still read what we wrote when X-Pack was enabled.") config.verify_commercial_api_status(False, service_name=foldered_name) doc = config.get_document(config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, service_name=foldered_name) assert doc["_source"]["name"] == "X-Pack" # reset upgrade strategy to serial config.update_app(foldered_name, {'UPDATE_STRATEGY': 'serial'}, current_expected_task_count)
def test_config_with_custom_yml(configure_package) -> None: sdk_install.install( config.PACKAGE_NAME, service_name=config.SERVICE_NAME, expected_running_tasks=config.DEFAULT_TASK_COUNT, ) decoded_base_64_yml = "logging.json: true" base_64_yml = base64.b64encode(decoded_base_64_yml.encode("utf-8")).decode("utf-8") elasticsearch_url = "http://" + sdk_hosts.vip_host(config.SERVICE_NAME, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME, 0, {"kibana": {"elasticsearch_url": elasticsearch_url, "custom_kibana_yml": base_64_yml}}, wait_for_deployment=False, insert_strict_options=False, ) cmd = "bash -c 'grep \"{}\" kibana-*/config/kibana.yml'".format(decoded_base_64_yml) rc, stdout, stderr = sdk_cmd.marathon_task_exec(config.KIBANA_SERVICE_NAME, cmd) assert rc == 0 and decoded_base_64_yml in stdout config.check_kibana_adminrouter_integration("service/{}/".format(config.KIBANA_SERVICE_NAME))
def test_java_keystore(hello_world_service): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ task_id = sdk_tasks.get_task_ids(PACKAGE_NAME, 'artifacts')[0] assert task_id # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app curl = ( 'curl -v -i ' '--cacert secure-tls-pod.ca ' 'https://' + sdk_hosts.vip_host( PACKAGE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) + '/hello-world' ) output = task_exec(task_id, curl, return_stderr_in_stdout=True) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert 'HTTP/1.1 200 OK' in output assert 'CAfile: secure-tls-pod.ca' in output tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"' ) assert tls_verification_msg in output
def get_service_principals(service_name: str, realm: str) -> list: """ Sets up the appropriate principals needed for a kerberized deployment of HDFS. :return: A list of said principals """ primaries = ["hdfs", "HTTP"] tasks = [ "name-0-node", "name-0-zkfc", "name-1-node", "name-1-zkfc", "journal-0-node", "journal-1-node", "journal-2-node", "data-0-node", "data-1-node", "data-2-node", ] instances = map(lambda task: sdk_hosts.autoip_host(service_name, task), tasks) principals = kerberos.generate_principal_list(primaries, instances, realm) principals.extend(kerberos.generate_principal_list(USERS, [None], realm)) http_instance = sdk_hosts.vip_host("marathon", ".".join(["api", service_name])) http_principal = kerberos.genererate_principal("HTTP", http_instance, realm) principals.append(http_principal) return principals
def test_kibana_plugin_installation(): try: elasticsearch_url = "http://" + sdk_hosts.vip_host( service_name, "coordinator", 9200) sdk_install.install( kibana_package_name, kibana_service_name, 0, { "kibana": { "plugins": "https://s3.amazonaws.com/downloads.mesosphere.io/infinity-artifacts/elastic/logtrail-7.4.0-0.1.31.zip", "elasticsearch_url": elasticsearch_url, } }, timeout_seconds=kibana_timeout, wait_for_deployment=False, insert_strict_options=False, ) assert config.check_kibana_plugin_installed("logtrail", kibana_service_name) except Exception: log.exception(Exception) finally: log.info("Ensure elasticsearch and kibana are uninstalled.") sdk_install.uninstall(kibana_package_name, kibana_package_name)
def test_endpoints_address(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) @retrying.retry(wait_fixed=1000, stop_max_delay=120 * 1000, retry_on_result=lambda res: not res) def wait(): _, ret, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, "endpoints {}".format(config.DEFAULT_TASK_NAME), parse_json=True, ) if len(ret["address"]) == config.DEFAULT_BROKER_COUNT: return ret return False endpoints = wait() # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints["address"]) == config.DEFAULT_BROKER_COUNT assert len(endpoints["dns"]) == config.DEFAULT_BROKER_COUNT for i in range(len(endpoints["dns"])): assert (sdk_hosts.autoip_host(foldered_name, "kafka-{}-broker".format(i)) in endpoints["dns"][i]) assert endpoints["vip"] == sdk_hosts.vip_host(foldered_name, "broker", 9092)
def test_endpoints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for endpoint in config.ENDPOINT_TYPES: # TODO: if an endpoint isn't present this call fails w/ the following: # 》dcos beta-elastic --name=/test/integration/elastic endpoints ingest-http # *** snip *** # Could not reach the service scheduler with name '/test/integration/elastic'.capitalizeDid you provide the correct service name? Specify a different name with '--name=<name>'.absWas the service recently installed or updated? It may still be initializing, wait a bit and try again.abs 1 # *** snip *** # Please consider using the CLI endpoints (list) command to determine which endpoints are present # rather than a hardcoded set in config.py . # Also please consider either fixing the CLI+API to emit an error message that is more meaningful. # The service is up, the error condition is that the user requested an endpoint that isn't present # that should be a 404, (specific resource) not found, not that the service is down or that the # service name is not found. # # further, since we expect the endpoints to differ if ingest nodes A) is zero ; or B) positive integer, we # should have a test for each case. endpoints = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints {}'.format(endpoint), json=True) host = endpoint.split('-')[0] # 'coordinator-http' => 'coordinator' assert endpoints['dns'][0].startswith( sdk_hosts.autoip_host(foldered_name, host + '-0-node')) assert endpoints['vip'].startswith( sdk_hosts.vip_host(foldered_name, host))
def test_java_keystore(): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app curl = ('curl -v -i ' '--cacert secure-tls-pod.ca ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) + '/hello-world') _, _, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'artifacts-0-node', curl) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert 'HTTP/1.1 200 OK' in stderr assert 'CAfile: secure-tls-pod.ca' in stderr tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"') assert tls_verification_msg in stderr
def kibana_application(elastic_service): try: elasticsearch_url = "https://" + sdk_hosts.vip_host( config.SERVICE_NAME, "coordinator", 9200) sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME) sdk_install.install( config.KIBANA_PACKAGE_NAME, service_name=config.KIBANA_SERVICE_NAME, expected_running_tasks=0, additional_options={ "kibana": { "xpack_enabled": True, "elasticsearch_tls": True, "elasticsearch_url": elasticsearch_url, } }, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, ) yield finally: sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME)
def get_service_principals(service_name: str, realm: str) -> list: """ Sets up the appropriate principals needed for a kerberized deployment of HDFS. :return: A list of said principals """ primaries = ["hdfs", "HTTP"] tasks = [ "name-0-node", "name-0-zkfc", "name-1-node", "name-1-zkfc", "journal-0-node", "journal-1-node", "journal-2-node", "data-0-node", "data-1-node", "data-2-node", ] instances = map(lambda task: sdk_hosts.autoip_host(service_name, task), tasks) principals = kerberos.generate_principal_list(primaries, instances, realm) principals.extend(kerberos.generate_principal_list(USERS, [ None, ], realm)) http_instance = sdk_hosts.vip_host("marathon", ".".join(["api", service_name])) http_principal = kerberos.genererate_principal("HTTP", http_instance, realm) principals.append(http_principal) return principals
def test_virtual_network(configure_package) -> None: sdk_install.install( config.PACKAGE_NAME, service_name=config.SERVICE_NAME, expected_running_tasks=config.DEFAULT_TASK_COUNT, additional_options=sdk_networks.ENABLE_VIRTUAL_NETWORKS_OPTIONS, ) elasticsearch_url = "http://" + sdk_hosts.vip_host(config.SERVICE_NAME, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME, 0, { "service": { "virtual_network_enabled": True, "virtual_network_plugin_labels": _labels_to_config( kibana_virtual_network_plugin_labels ), }, "kibana": {"elasticsearch_url": elasticsearch_url}, }, wait_for_deployment=False, insert_strict_options=False, ) _check_cni_working(kibana_virtual_network_plugin_labels)
def test_java_keystore(hello_world_service): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'artifacts')[0] assert task_id # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app curl = ( 'curl -v -i ' '--cacert secure-tls-pod.ca ' 'https://' + sdk_hosts.vip_host( config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) + '/hello-world' ) output = task_exec(task_id, curl, return_stderr_in_stdout=True) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert 'HTTP/1.1 200 OK' in output assert 'CAfile: secure-tls-pod.ca' in output tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"' ) assert tls_verification_msg in output
def test_java_keystore(): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app cmd_list = [ "curl", "-v", "-i", "--cacert", "secure-tls-pod.ca", "https://{}/hello-world".format( sdk_hosts.vip_host(config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME)), ] curl = " ".join(cmd_list) _, _, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME, "artifacts-0-node", curl) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert "HTTP/1.1 200 OK" in stderr assert "CAfile: secure-tls-pod.ca" in stderr tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"') assert tls_verification_msg in stderr
def test_java_keystore(): """ Java `keystore-app` presents itself with provided TLS certificate from keystore. """ # Make a curl request from artifacts container to `keystore-app` # and make sure that mesos curl can verify certificate served by app cmd_list = [ "curl", "-v", "-i", "--cacert", "secure-tls-pod.ca", "https://{}/hello-world".format( sdk_hosts.vip_host(config.SERVICE_NAME, KEYSTORE_TASK_HTTPS_PORT_NAME) ), ] curl = " ".join(cmd_list) _, _, stderr = sdk_cmd.service_task_exec(config.SERVICE_NAME, "artifacts-0-node", curl) # Check that HTTP request was successful with response 200 and make sure # that curl with pre-configured cert was used and that task was matched # by SAN in certificate. assert "HTTP/1.1 200 OK" in stderr assert "CAfile: secure-tls-pod.ca" in stderr tls_verification_msg = ( 'host "keystore-https.hello-world.l4lb.thisdcos.directory" matched ' 'cert\'s "keystore-https.hello-world.l4lb.thisdcos.directory"' ) assert tls_verification_msg in stderr
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: endpoints = json.loads( cmd.run_cli('cassandra --name={} endpoints node'.format( config.get_foldered_service_name()))) assert endpoints['dns'][0] == sdk_hosts.autoip_host( config.get_foldered_service_name(), 'node-0-server', 9042) assert endpoints['vip'] == sdk_hosts.vip_host( config.get_foldered_service_name(), 'node', 9042)
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: endpoints = json.loads( cmd.run_cli('cassandra --name={} endpoints node'.format( FOLDERED_SERVICE_NAME))) assert endpoints['dns'][0] == hosts.autoip_host(FOLDERED_SERVICE_NAME, 'node-0-server', 9042) assert endpoints['vips'][0] == hosts.vip_host(FOLDERED_SERVICE_NAME, 'node', 9042)
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for endpoint in config.ENDPOINT_TYPES: endpoints = cmd.svc_cli( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'endpoints {}'.format(endpoint), json=True) host = endpoint.split('-')[0] # 'coordinator-http' => 'coordinator' assert endpoints['dns'][0].startswith(sdk_hosts.autoip_host(FOLDERED_SERVICE_NAME, host + '-0-node')) assert endpoints['vip'].startswith(sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, host))
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for nodetype in ('coordinator', 'data', 'ingest', 'master'): endpoints = json.loads( cmd.run_cli('elastic --name={} endpoints {}'.format( FOLDERED_SERVICE_NAME, nodetype))) assert endpoints['dns'][0].startswith( hosts.autoip_host(FOLDERED_SERVICE_NAME, nodetype + '-0-node')) assert endpoints['vips'][0].startswith( hosts.vip_host(FOLDERED_SERVICE_NAME, nodetype))
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for endpoint in config.ENDPOINT_TYPES: endpoints = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints {}'.format(endpoint), json=True) host = endpoint.split('-')[0] # 'coordinator-http' => 'coordinator' assert endpoints['dns'][0].startswith(sdk_hosts.autoip_host(foldered_name, host + '-0-node')) assert endpoints['vip'].startswith(sdk_hosts.vip_host(foldered_name, host)) sdk_plan.wait_for_completed_deployment(foldered_name) sdk_plan.wait_for_completed_recovery(foldered_name)
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for endpoint in config.ENDPOINT_TYPES: endpoints = json.loads( cmd.run_cli('elastic --name={} endpoints {}'.format( FOLDERED_SERVICE_NAME, endpoint))) host = endpoint.split('-')[0] # 'coordinator-http' => 'coordinator' assert endpoints['dns'][0].startswith( sdk_hosts.autoip_host(FOLDERED_SERVICE_NAME, host + '-0-node')) assert endpoints['vip'].startswith( sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, host))
def test_endpoints() -> None: # Check that we can reach the scheduler via admin router, and that returned endpoints are # sanitized. for endpoint in config.ENDPOINT_TYPES: endpoints = sdk_networks.get_endpoint(package_name, service_name, endpoint) host = endpoint.split("-")[0] # 'coordinator-http' => 'coordinator' assert endpoints["dns"][0].startswith(sdk_hosts.autoip_host(service_name, host + "-0-node")) assert endpoints["vip"].startswith(sdk_hosts.vip_host(service_name, host)) sdk_plan.wait_for_completed_deployment(service_name) sdk_plan.wait_for_completed_recovery(service_name)
def test_unique_vips(): @retrying.retry(wait_exponential_multiplier=1000, stop_max_attempt_number=7) # ~2 minutes def verify_ip_is_reachable(ip): ok, _ = sdk_cmd.master_ssh("curl -v {}".format(ip)) assert ok spark1_service_name = "test/groupa/spark" spark2_service_name = "test/groupb/spark" try: utils.require_spark(spark1_service_name) utils.require_spark(spark2_service_name) dispatcher1_ui_ip = sdk_hosts.vip_host("marathon", "dispatcher.{}".format(spark1_service_name), 4040) dispatcher2_ui_ip = sdk_hosts.vip_host("marathon", "dispatcher.{}".format(spark2_service_name), 4040) verify_ip_is_reachable(dispatcher1_ui_ip) verify_ip_is_reachable(dispatcher2_ui_ip) finally: utils.teardown_spark(service_name=spark1_service_name) utils.teardown_spark(service_name=spark2_service_name)
def test_unique_vips(): spark1_service_name = "test/groupa/spark" spark2_service_name = "test/groupb/spark" try: utils.require_spark(spark1_service_name) utils.require_spark(spark2_service_name) dispatcher1_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark1_service_name), 4040) dispatcher2_ui = sdk_hosts.vip_host( "marathon", "dispatcher.{}".format(spark2_service_name), 4040) # verify dispatcher-ui is reachable at VIP ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher1_ui)) assert ok ok, _ = sdk_cmd.master_ssh("curl {}".format(dispatcher2_ui)) assert ok finally: sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark1_service_name) sdk_install.uninstall(utils.SPARK_PACKAGE_NAME, spark2_service_name)
def test_endpoints(): # check that we can reach the scheduler via admin router, and that returned endpoints are sanitized: for endpoint in config.ENDPOINT_TYPES: endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME, foldered_name, endpoint) host = endpoint.split("-")[0] # 'coordinator-http' => 'coordinator' assert endpoints["dns"][0].startswith( sdk_hosts.autoip_host(foldered_name, host + "-0-node")) assert endpoints["vip"].startswith( sdk_hosts.vip_host(foldered_name, host)) sdk_plan.wait_for_completed_deployment(foldered_name) sdk_plan.wait_for_completed_recovery(foldered_name)
def test_endpoints_address(): endpoints = sdk_networks.get_endpoint(config.PACKAGE_NAME, FOLDERED_NAME, "broker") # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints["address"]) == config.DEFAULT_BROKER_COUNT assert len(endpoints["dns"]) == config.DEFAULT_BROKER_COUNT for i in range(len(endpoints["dns"])): assert (sdk_hosts.autoip_host(FOLDERED_NAME, "kafka-{}-broker".format(i)) in endpoints["dns"][i]) assert endpoints["vip"] == sdk_hosts.vip_host(FOLDERED_NAME, "broker", 9092)
def test_admin_router_with_folder_name() -> None: elastic_service_name = "test/integration/elastic" kibana_service_name = "test/integration/kibana" try: sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, kibana_service_name) sdk_install.uninstall(config.PACKAGE_NAME, elastic_service_name) service_account_info = transport_encryption.setup_service_account(elastic_service_name) sdk_install.install( config.PACKAGE_NAME, service_name=elastic_service_name, expected_running_tasks=config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": elastic_service_name, "service_account": service_account_info["name"], "service_account_secret": service_account_info["secret"], } }, timeout_seconds=30 * 60, wait_for_deployment=True, ) elasticsearch_url = "http://" + sdk_hosts.vip_host( elastic_service_name, "coordinator", 9200 ) sdk_install.install( config.KIBANA_PACKAGE_NAME, kibana_service_name, 0, {"kibana": {"elasticsearch_url": elasticsearch_url}}, wait_for_deployment=False, insert_strict_options=False, ) encoded_kibana_service_name = urllib.parse.quote(kibana_service_name) config.check_kibana_adminrouter_integration( "service/{}/".format(encoded_kibana_service_name) ) config.check_kibana_adminrouter_integration( "service/{}/app/kibana".format(encoded_kibana_service_name) ) finally: sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, kibana_service_name) sdk_install.uninstall(config.PACKAGE_NAME, elastic_service_name) transport_encryption.cleanup_service_account(elastic_service_name, service_account_info)
def test_endpoints_address(): def fun(): ret = sdk_cmd.svc_cli( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'endpoints {}'.format(config.DEFAULT_TASK_NAME), json=True) if len(ret['address']) == config.DEFAULT_BROKER_COUNT: return ret return False endpoints = shakedown.wait_for(fun) # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints['address']) == config.DEFAULT_BROKER_COUNT assert len(endpoints['dns']) == config.DEFAULT_BROKER_COUNT for i in range(len(endpoints['dns'])): assert sdk_hosts.autoip_host( FOLDERED_SERVICE_NAME, 'kafka-{}-broker'.format(i)) in endpoints['dns'][i] assert endpoints['vip'] == sdk_hosts.vip_host( FOLDERED_SERVICE_NAME, 'broker', 9092)
def test_endpoints_address(): def fun(): ret = service_cli('endpoints {}'.format(DEFAULT_TASK_NAME), service_name=FOLDERED_SERVICE_NAME) if len(ret['address']) == DEFAULT_BROKER_COUNT: return ret return False endpoints = shakedown.wait_for(fun) # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints['address']) == DEFAULT_BROKER_COUNT assert len(endpoints['dns']) == DEFAULT_BROKER_COUNT for i in range(len(endpoints['dns'])): assert hosts.autoip_host( FOLDERED_SERVICE_NAME, 'kafka-{}-broker'.format(i)) in endpoints['dns'][i] assert endpoints['vips'][0] == hosts.vip_host(FOLDERED_SERVICE_NAME, 'broker', 9092)
def test_java_truststore(): """ Make an HTTP request from CLI to nginx exposed service. Test that CLI reads and uses truststore to verify HTTPS connection. """ # Make an http request from a CLI app using configured keystore to the # service itself exposed via VIP. # This will test whether the service is serving correct end-entity # certificate from keystore and if CLI client can verify certificate # with custom truststore configuration. command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME)) _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'keystore-0-webserver', command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def test_tls_nginx(): """ Checks that NGINX exposes TLS service with correct PEM encoded end-entity certificate. """ # Use keystore-app `truststoretest` CLI command to run request against # the NGINX container to verify that nginx presents itself with end-entity # certificate that can be verified by with truststore. command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + '/') _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, 'keystore-0-webserver', command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def kibana_application(elastic_service: Dict[str, Any]) -> Iterator[Dict[str, Any]]: yield from tls._kibana_application_impl( elastic_service, { "service": { "name": config.KIBANA_SERVICE_NAME, "virtual_network_enabled": True, "virtual_network_plugin_labels": _labels_to_config( kibana_virtual_network_plugin_labels ), }, "kibana": { "elasticsearch_tls": True, "elasticsearch_url": "https://" + sdk_hosts.vip_host(elastic_service["service"]["name"], "coordinator", 9200), "elasticsearch_xpack_security_enabled": True, "password": elastic_service["passwords"]["kibana"], }, }, )
def test_java_truststore(): """ Make an HTTP request from CLI to nginx exposed service. Test that CLI reads and uses truststore to verify HTTPS connection. """ # Make an http request from a CLI app using configured keystore to the # service itself exposed via VIP. # This will test whether the service is serving correct end-entity # certificate from keystore and if CLI client can verify certificate # with custom truststore configuration. command = _java_command( "java -jar " + KEYSTORE_APP_JAR_NAME + " truststoretest " "integration-test.yml " "https://" + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) ) _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "keystore-0-webserver", command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert "status=200" in output
def test_tls_nginx(): """ Checks that NGINX exposes TLS service with correct PEM encoded end-entity certificate. """ # Use keystore-app `truststoretest` CLI command to run request against # the NGINX container to verify that nginx presents itself with end-entity # certificate that can be verified by with truststore. command = _java_command( "java -jar " + KEYSTORE_APP_JAR_NAME + " truststoretest " "integration-test.yml " "https://" + sdk_hosts.vip_host(config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + "/" ) _, output, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "keystore-0-webserver", command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert "status=200" in output
def kibana_application( elastic_service: Dict[str, Any]) -> Iterator[Dict[str, Any]]: package_name = config.KIBANA_PACKAGE_NAME service_name = config.KIBANA_SERVICE_NAME elasticsearch_url = "https://" + sdk_hosts.vip_host( elastic_service["service"]["name"], "coordinator", 9200) service_options = { "service": { "name": service_name }, "kibana": { "elasticsearch_tls": True, "elasticsearch_url": elasticsearch_url, "elasticsearch_xpack_security_enabled": True, "password": elastic_service["passwords"]["kibana"], }, } try: sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name=service_name, expected_running_tasks=0, additional_options=service_options, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, ) yield { **service_options, **{ "package_name": package_name, "elastic": elastic_service } } finally: sdk_install.uninstall(package_name, service_name)
def test_endpoints_address(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) def fun(): ret = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints {}'.format(config.DEFAULT_TASK_NAME), json=True) if len(ret['address']) == config.DEFAULT_BROKER_COUNT: return ret return False endpoints = shakedown.wait_for(fun) # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints['address']) == config.DEFAULT_BROKER_COUNT assert len(endpoints['dns']) == config.DEFAULT_BROKER_COUNT for i in range(len(endpoints['dns'])): assert sdk_hosts.autoip_host( foldered_name, 'kafka-{}-broker'.format(i)) in endpoints['dns'][i] assert endpoints['vip'] == sdk_hosts.vip_host(foldered_name, 'broker', 9092)
def test_endpoints_address(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) @retrying.retry( wait_fixed=1000, stop_max_delay=120*1000, retry_on_result=lambda res: not res) def wait(): ret = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'endpoints {}'.format(config.DEFAULT_TASK_NAME), json=True) if len(ret['address']) == config.DEFAULT_BROKER_COUNT: return ret return False endpoints = wait() # NOTE: do NOT closed-to-extension assert len(endpoints) == _something_ assert len(endpoints['address']) == config.DEFAULT_BROKER_COUNT assert len(endpoints['dns']) == config.DEFAULT_BROKER_COUNT for i in range(len(endpoints['dns'])): assert sdk_hosts.autoip_host(foldered_name, 'kafka-{}-broker'.format(i)) in endpoints['dns'][i] assert endpoints['vip'] == sdk_hosts.vip_host(foldered_name, 'broker', 9092)
def kibana_application_tls(elastic_service_tls): try: elasticsearch_url = "https://" + sdk_hosts.vip_host(config.SERVICE_NAME, "coordinator", 9200) sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME) sdk_install.install( config.KIBANA_PACKAGE_NAME, service_name=config.KIBANA_SERVICE_NAME, expected_running_tasks=0, additional_options={ "kibana": { "xpack_enabled": True, "elasticsearch_tls": True, "elasticsearch_url": elasticsearch_url } }, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False) yield finally: sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME)
def test_java_truststore(hello_world_service): """ Make an HTTP request from CLI to nginx exposed service. Test that CLI reads and uses truststore to verify HTTPS connection. """ task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, "keystore")[0] assert task_id # Make an http request from a CLI app using configured keystore to the # service itself exposed via VIP. # This will test whether the service is serving correct end-entity # certificate from keystore and if CLI client can verify certificate # with custom truststore configuration. command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host( config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME)) output = task_exec(task_id, command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def test_tls_nginx(hello_world_service): """ Checks that NGINX exposes TLS service with correct PEM encoded end-entity certificate. """ # Use keystore-app `truststoretest` CLI command to run request against # the NGINX container to verify that nginx presents itself with end-entity # certificate that can be verified by with truststore. task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, 'keystore')[0] assert task_id command = _java_command( 'java -jar ' + KEYSTORE_APP_JAR_NAME + ' truststoretest ' 'integration-test.yml ' 'https://' + sdk_hosts.vip_host( config.SERVICE_NAME, NGINX_TASK_HTTPS_PORT_NAME) + '/') output = task_exec(task_id, command) # Unfortunately the `dcos task exec` doesn't respect the return code # from executed command in container so we need to manually assert for # expected output. assert 'status=200' in output
def test_xpack_toggle_with_kibana(default_populated_index): log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status(False, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack disabled...") elasticsearch_url = "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "kibana": { "elasticsearch_url": elasticsearch_url }}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False) config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME)) log.info("Uninstall kibana with X-Pack disabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Set/verify X-Pack enabled in elasticsearch. Requires parallel upgrade strategy for full restart.") config.set_xpack(True, service_name=foldered_name) config.check_elasticsearch_plugin_installed(config.XPACK_PLUGIN_NAME, service_name=foldered_name) config.verify_commercial_api_status(True, service_name=foldered_name) config.verify_xpack_license(service_name=foldered_name) log.info("\n***** Write some data while enabled, disable X-Pack, and verify we can still read what we wrote.") config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, {"name": "X-Pack", "role": "commercial plugin"}, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack enabled...") log.info("\n***** Installing Kibana w/X-Pack can exceed default 15 minutes for Marathon " "deployment to complete due to a configured HTTP health check. (typical: 12 minutes)") sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "kibana": { "elasticsearch_url": elasticsearch_url, "xpack_enabled": True }}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False) config.check_kibana_plugin_installed(config.XPACK_PLUGIN_NAME, service_name=config.KIBANA_PACKAGE_NAME) config.check_kibana_adminrouter_integration("service/{}/login".format(config.KIBANA_PACKAGE_NAME)) log.info("\n***** Uninstall kibana with X-Pack enabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Disable X-Pack in elasticsearch.") config.set_xpack(False, service_name=foldered_name) log.info("\n***** Verify we can still read what we wrote when X-Pack was enabled.") config.verify_commercial_api_status(False, service_name=foldered_name) doc = config.get_document(config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, service_name=foldered_name) assert doc["_source"]["name"] == "X-Pack" # reset upgrade strategy to serial config.update_app(foldered_name, {'UPDATE_STRATEGY': 'serial'}, current_expected_task_count) sdk_plan.wait_for_completed_deployment(foldered_name) sdk_plan.wait_for_completed_recovery(foldered_name)
def test_xpack_toggle_with_kibana(default_populated_index): # Verify disabled by default verify_commercial_api_status(False, service_name=FOLDERED_SERVICE_NAME) enable_xpack(service_name=FOLDERED_SERVICE_NAME) # Test kibana with x-pack disabled... install.uninstall("kibana") shakedown.install_package( "kibana", options_json={ "kibana": { "elasticsearch_url": "http://" + hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200) } }) shakedown.deployment_wait(app_id="/kibana", timeout=KIBANA_WAIT_TIME_IN_SECONDS) check_kibana_adminrouter_integration("service/kibana/") install.uninstall("kibana") # Set/verify enabled verify_commercial_api_status(True, service_name=FOLDERED_SERVICE_NAME) verify_xpack_license(service_name=FOLDERED_SERVICE_NAME) # Write some data while enabled, disable X-Pack, and verify we can still read what we wrote. create_document(DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, 2, { "name": "X-Pack", "role": "commercial plugin" }, service_name=FOLDERED_SERVICE_NAME) # Test kibana with x-pack enabled... shakedown.install_package( "kibana", options_json={ "kibana": { "elasticsearch_url": "http://" + hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200), "xpack_enabled": True } }) # Installing Kibana w/x-pack can take as much as 15 minutes for Marathon deployment to complete, # due to a configured HTTP health check. (typical: 10 minutes) shakedown.deployment_wait(app_id="/kibana", timeout=KIBANA_WAIT_TIME_IN_SECONDS) check_kibana_adminrouter_integration("service/kibana/login") install.uninstall("kibana") # Disable again disable_xpack(service_name=FOLDERED_SERVICE_NAME) verify_commercial_api_status(False, service_name=FOLDERED_SERVICE_NAME) doc = get_document(DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, 2, service_name=FOLDERED_SERVICE_NAME) assert doc["_source"]["name"] == "X-Pack"
def test_security_toggle_with_kibana(default_populated_index: None) -> None: http_user = config.DEFAULT_ELASTICSEARCH_USER # Verify that commercial APIs are disabled by default in Elasticsearch. config.verify_commercial_api_status(False, service_name=service_name) # Write some data with security disabled, enabled security, and afterwards verify that we can # still read what we wrote. document_security_disabled_id = 1 document_security_disabled_fields = {"name": "Elasticsearch", "role": "search engine"} config.create_document( index_name, index_type, document_security_disabled_id, document_security_disabled_fields, service_name=service_name, ) # Verify that basic license is enabled by default. config.verify_xpack_license("basic", service_name=service_name) # Install Kibana. elasticsearch_url = "http://" + sdk_hosts.vip_host(service_name, "coordinator", 9200) sdk_install.install( kibana_package_name, kibana_service_name, 0, {"kibana": {"elasticsearch_url": elasticsearch_url}}, timeout_seconds=kibana_timeout, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration("service/{}/app/kibana".format(kibana_package_name)) # Uninstall it. sdk_install.uninstall(kibana_package_name, kibana_package_name) # Enable Elasticsearch security. sdk_service.update_configuration( package_name, service_name, { "elasticsearch": {"xpack_security_enabled": True}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # This should still be disabled. config.verify_commercial_api_status(False, service_name=service_name) # Start trial license. config.start_trial_license(service_name=service_name) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(service_name) # Verify trial license is working. config.verify_xpack_license( "trial", service_name=service_name, http_user=http_user, http_password=passwords["elastic"] ) config.verify_commercial_api_status( True, service_name=service_name, http_user=http_user, http_password=passwords["elastic"] ) # Write some data with security enabled, disable security, and afterwards verify that we can # still read what we wrote. document_security_enabled_id = 2 document_security_enabled_fields = {"name": "X-Pack", "role": "commercial plugin"} config.create_document( index_name, index_type, document_security_enabled_id, document_security_enabled_fields, service_name=service_name, http_user=http_user, http_password=passwords["elastic"], ) # Install Kibana with security enabled. sdk_install.install( kibana_package_name, kibana_package_name, 0, { "kibana": { "elasticsearch_url": elasticsearch_url, "elasticsearch_xpack_security_enabled": True, "user": config.DEFAULT_KIBANA_USER, "password": passwords["kibana"], } }, timeout_seconds=kibana_timeout, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. Notice that with security enabled, one has to access # /service/kibana/login instead of /service/kibana. config.check_kibana_adminrouter_integration("service/{}/login".format(kibana_package_name)) # Uninstall it. sdk_install.uninstall(kibana_package_name, kibana_package_name) # Disable Elastic security. sdk_service.update_configuration( package_name, service_name, { "elasticsearch": {"xpack_security_enabled": False}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # Verify we can read what was written before toggling security, without basic HTTP credentials. document_security_disabled = config.get_document( index_name, index_type, document_security_disabled_id, service_name=service_name ) assert ( document_security_disabled["_source"]["name"] == document_security_disabled_fields["name"] ) # Verify we can read what was written when security was enabled, without basic HTTP credentials. document_security_enabled = config.get_document( index_name, index_type, document_security_enabled_id, service_name=service_name ) assert document_security_enabled["_source"]["name"] == document_security_enabled_fields["name"] # Set update_strategy back to serial. sdk_service.update_configuration( package_name, service_name, {"service": {"update_strategy": "serial"}}, current_expected_task_count, )
def test_security_toggle_with_kibana(default_populated_index): # Verify that commercial APIs are disabled by default in Elasticsearch. config.verify_commercial_api_status(False, service_name=foldered_name) # Write some data with security disabled, enabled security, and afterwards verify that we can # still read what we wrote. document_security_disabled_id = 1 document_security_disabled_fields = {"name": "Elasticsearch", "role": "search engine"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, document_security_disabled_fields, service_name=foldered_name, ) # Verify that basic license is enabled by default. config.verify_xpack_license("basic", service_name=foldered_name) # Install Kibana. elasticsearch_url = "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, {"kibana": {"elasticsearch_url": elasticsearch_url}}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration("service/{}/".format(config.KIBANA_PACKAGE_NAME)) # Uninstall it. sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) # Enable Elasticsearch security. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "elasticsearch": {"xpack_security_enabled": True}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # This should still be disabled. config.verify_commercial_api_status(False, service_name=foldered_name) # Start trial license. config.start_trial_license(service_name=foldered_name) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(foldered_name) # Verify trial license is working. config.verify_xpack_license( "trial", service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) config.verify_commercial_api_status( True, service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) # Write some data with security enabled, disable security, and afterwards verify that we can # still read what we wrote. document_security_enabled_id = 2 document_security_enabled_fields = {"name": "X-Pack", "role": "commercial plugin"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, document_security_enabled_fields, service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) # Install Kibana with security enabled. sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "kibana": { "elasticsearch_url": elasticsearch_url, "elasticsearch_xpack_security_enabled": True, "user": config.DEFAULT_KIBANA_USER, "password": passwords["kibana"], } }, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. Notice that with security enabled, one has to access # /service/kibana/login instead of /service/kibana. config.check_kibana_adminrouter_integration( "service/{}/login".format(config.KIBANA_PACKAGE_NAME) ) # Uninstall it. sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) # Disable Elastic security. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "elasticsearch": {"xpack_security_enabled": False}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # Verify we can read what was written before toggling security, without basic HTTP credentials. document_security_disabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, service_name=foldered_name, ) assert ( document_security_disabled["_source"]["name"] == document_security_disabled_fields["name"] ) # Verify we can read what was written when security was enabled, without basic HTTP credentials. document_security_enabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, service_name=foldered_name, ) assert document_security_enabled["_source"]["name"] == document_security_enabled_fields["name"] # Set update_strategy back to serial. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"service": {"update_strategy": "serial"}}, current_expected_task_count, )