def test_add_ingest_and_coordinator_nodes_does_not_restart_master_or_data_nodes(): initial_master_task_ids = sdk_tasks.get_task_ids(foldered_name, "master") initial_data_task_ids = sdk_tasks.get_task_ids(foldered_name, "data") # Get service configuration. _, svc_config, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, "describe", parse_json=True ) ingest_nodes_count = get_in(["ingest_nodes", "count"], svc_config) coordinator_nodes_count = get_in(["coordinator_nodes", "count"], svc_config) global current_expected_task_count sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "ingest_nodes": {"count": ingest_nodes_count + 1}, "coordinator_nodes": {"count": coordinator_nodes_count + 1}, }, current_expected_task_count, # As of 2018-12-14, sdk_upgrade's `wait_for_deployment` has different behavior than # sdk_install's (which is what we wanted here), so don't use it. Check manually afterwards # with `sdk_tasks.check_running`. wait_for_deployment=False, ) # Should be running 2 tasks more. current_expected_task_count += 2 sdk_tasks.check_running(foldered_name, current_expected_task_count) # Master nodes should not restart. sdk_tasks.check_tasks_not_updated(foldered_name, "master", initial_master_task_ids) # Data nodes should not restart. sdk_tasks.check_tasks_not_updated(foldered_name, "data", initial_data_task_ids)
def test_add_ingest_and_coordinator_nodes_does_not_restart_master_or_data_nodes() -> None: initial_master_task_ids = sdk_tasks.get_task_ids(service_name, "master") initial_data_task_ids = sdk_tasks.get_task_ids(service_name, "data") # Get service configuration. _, svc_config, _ = sdk_cmd.svc_cli(package_name, service_name, "describe", parse_json=True) ingest_nodes_count = get_in(["ingest_nodes", "count"], svc_config) coordinator_nodes_count = get_in(["coordinator_nodes", "count"], svc_config) global current_expected_task_count sdk_service.update_configuration( package_name, service_name, { "ingest_nodes": {"count": ingest_nodes_count + 1}, "coordinator_nodes": {"count": coordinator_nodes_count + 1}, }, current_expected_task_count, # As of 2018-12-14, sdk_upgrade's `wait_for_deployment` has different behavior than # sdk_install's (which is what we wanted here), so don't use it. Check manually afterwards # with `sdk_tasks.check_running`. wait_for_deployment=False, ) # Should be running 2 tasks more. current_expected_task_count += 2 sdk_tasks.check_running(service_name, current_expected_task_count) # Master nodes should not restart. sdk_tasks.check_tasks_not_updated(service_name, "master", initial_master_task_ids) # Data nodes should not restart. sdk_tasks.check_tasks_not_updated(service_name, "data", initial_data_task_ids)
def test_plugin_install_and_uninstall(default_populated_index: None) -> None: plugins = "analysis-icu" sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"elasticsearch": { "plugins": plugins }}, current_expected_task_count, ) config.check_elasticsearch_plugin_installed(plugins, service_name=foldered_name) sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"elasticsearch": { "plugins": "" }}, current_expected_task_count, ) config.check_elasticsearch_plugin_uninstalled(plugins, service_name=foldered_name)
def test_udf() -> None: test_jobs: List[Dict[str, Any]] = [] try: test_jobs = config.get_udf_jobs( node_address=config.get_foldered_node_address()) # destroy/reinstall any prior leftover jobs, so that they don't touch the newly installed service: for job in test_jobs: sdk_jobs.install_job(job) new_config = { "cassandra": { "enable_user_defined_functions": True, "enable_scripted_user_defined_functions": True, } } sdk_service.update_configuration( config.PACKAGE_NAME, config.get_foldered_service_name(), new_config, config.DEFAULT_TASK_COUNT, ) config.verify_client_can_write_read_udf( config.get_foldered_node_address()) finally: # remove job definitions from metronome for job in test_jobs: sdk_jobs.remove_job(job)
def test_custom_log4j2_properties_base64() -> None: try: decoded_base_64_log4j2_properties = "rootLogger.level = debug" base_64_log4j2_properties = base64.b64encode( decoded_base_64_log4j2_properties.encode("utf-8")).decode("utf-8") sdk_service.update_configuration( package_name, service_name, { "elasticsearch": { "custom_log4j2_properties": base_64_log4j2_properties } }, current_expected_task_count, ) cmd = "bash -c 'grep \"{}\" elasticsearch-*/config/log4j2.properties'".format( decoded_base_64_log4j2_properties) rc, stdout, stderr = sdk_cmd.service_task_exec(service_name, "master-0-node", cmd) assert rc == 0 and decoded_base_64_log4j2_properties in stdout finally: sdk_service.update_configuration( package_name, service_name, {"elasticsearch": { "custom_log4j2_properties": "" }}, current_expected_task_count, )
def elastic_service( service_account: Dict[str, Any]) -> Iterator[Dict[str, Any]]: package_name = config.PACKAGE_NAME service_name = config.SERVICE_NAME expected_running_tasks = config.DEFAULT_TASK_COUNT service_options = { "service": { "name": service_name, "service_account": service_account["name"], "service_account_secret": service_account["secret"], "security": { "transport_encryption": { "enabled": True } }, }, "elasticsearch": { "xpack_security_enabled": True }, } try: sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name=service_name, expected_running_tasks=expected_running_tasks, additional_options=service_options, timeout_seconds=30 * 60, ) # Start trial license. config.start_trial_license(service_name, https=True) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(service_name, https=True) # Set up healthcheck basic HTTP credentials. sdk_service.update_configuration( package_name, service_name, {"elasticsearch": { "health_user_password": passwords["elastic"] }}, expected_running_tasks, ) yield { **service_options, **{ "package_name": package_name, "passwords": passwords } } finally: sdk_install.uninstall(package_name, service_name)
def test_adding_data_node_only_restarts_masters() -> None: initial_master_task_ids = sdk_tasks.get_task_ids(foldered_name, "master") initial_data_task_ids = sdk_tasks.get_task_ids(foldered_name, "data") initial_coordinator_task_ids = sdk_tasks.get_task_ids( foldered_name, "coordinator") # Get service configuration. _, svc_config, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "describe", parse_json=True) data_nodes_count = get_in(["data_nodes", "count"], svc_config) global current_expected_task_count # Increase the data nodes count by 1. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"data_nodes": { "count": data_nodes_count + 1 }}, current_expected_task_count, # As of 2018-12-14, sdk_upgrade's `wait_for_deployment` has different behavior than # sdk_install's (which is what we wanted here), so don't use it. Check manually afterwards # with `sdk_tasks.check_running`. wait_for_deployment=False, ) sdk_plan.wait_for_kicked_off_deployment(foldered_name) sdk_plan.wait_for_completed_deployment(foldered_name) _, new_data_pod_info, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, "pod info data-{}".format(data_nodes_count), parse_json=True, ) # Get task ID for new data node task. new_data_task_id = get_in([0, "info", "taskId", "value"], new_data_pod_info) # Should be running 1 task more. current_expected_task_count += 1 sdk_tasks.check_running(foldered_name, current_expected_task_count) # Master nodes should restart. sdk_tasks.check_tasks_updated(foldered_name, "master", initial_master_task_ids) # Data node tasks should be the initial ones plus the new one. sdk_tasks.check_tasks_not_updated( foldered_name, "data", initial_data_task_ids + [new_data_task_id]) # Coordinator tasks should not restart. sdk_tasks.check_tasks_not_updated(foldered_name, "coordinator", initial_coordinator_task_ids)
def _elastic_service_impl( service_account: Dict[str, Any], configuration: Dict[str, Any] = {}) -> Iterator[Dict[str, Any]]: package_name = config.PACKAGE_NAME service_name = config.SERVICE_NAME expected_running_tasks = config.DEFAULT_TASK_COUNT try: sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name=service_name, expected_running_tasks=expected_running_tasks, additional_options=configuration, timeout_seconds=30 * 60, wait_for_deployment=False, ) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(service_name, https=True) http_password = passwords["elastic"] # Set up healthcheck basic HTTP credentials. sdk_service.update_configuration( package_name, service_name, {"elasticsearch": { "health_user_password": http_password }}, expected_running_tasks, ) config.check_elasticsearch_index_health( ".security-7", "green", service_name=service_name, http_password=http_password, https=True, ) yield { **configuration, **{ "package_name": package_name, "passwords": passwords } } finally: sdk_install.uninstall(package_name, service_name)
def test_custom_yaml_base64() -> None: # Apply this custom YAML block as a base64-encoded string: # cluster: # routing: # allocation: # node_initial_primaries_recoveries: 3 # script.allowed_contexts: ["search", "update"] base64_elasticsearch_yml = "".join([ "Y2x1c3RlcjoKICByb3V0aW5nOgogICAgYWxsb2NhdGlvbjoKICAgICAgbm9kZV9pbml0aWFsX3By", "aW1hcmllc19yZWNvdmVyaWVzOiAzCnNjcmlwdC5hbGxvd2VkX2NvbnRleHRzOiBbInNlYXJjaCIs", "ICJ1cGRhdGUiXQ==", ]) sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "elasticsearch": { "custom_elasticsearch_yml": base64_elasticsearch_yml } }, current_expected_task_count, ) # We're testing two things here: # 1. The default value for `cluster.routing.allocation.node_initial_primaries_recoveries` is 4. # Here we want to make sure that the end-result for the multiple YAML/Mustache compilation steps # results in a valid elasticsearch.yml file, with the correct setting value. config.check_custom_elasticsearch_cluster_setting( foldered_name, [ "cluster", "routing", "allocation", "node_initial_primaries_recoveries" ], "3", ) # 2. `script.allowed_contexts` has an "array of strings" value defined in the custom YAML. Here # we're also making sure that the end-result for the multiple YAML/Mustache compilation steps # results in a valid elasticsearch.yml file, but with a trickier compilation case due to the # setting value being an array of strings. config.check_custom_elasticsearch_cluster_setting( foldered_name, ["script", "allowed_contexts"], ["search", "update"], )
def test_pod_replace_then_immediate_config_update(): sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "pod replace data-0") plugins = "analysis-phonetic" sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"service": {"update_strategy": "parallel"}, "elasticsearch": {"plugins": plugins}}, current_expected_task_count, ) # Ensure all nodes, especially data-0, get launched with the updated config. config.check_elasticsearch_plugin_installed(plugins, service_name=foldered_name) sdk_plan.wait_for_completed_deployment(foldered_name) sdk_plan.wait_for_completed_recovery(foldered_name)
def test_pod_replace_then_immediate_config_update() -> None: sdk_cmd.svc_cli(package_name, service_name, "pod replace data-0") plugins = "analysis-phonetic" sdk_service.update_configuration( package_name, service_name, {"service": {"update_strategy": "parallel"}, "elasticsearch": {"plugins": plugins}}, current_expected_task_count, ) # Ensure all nodes, especially data-0, get launched with the updated config. config.check_elasticsearch_plugin_installed(plugins, service_name=service_name) sdk_plan.wait_for_completed_deployment(service_name) sdk_plan.wait_for_completed_recovery(service_name)
def test_plugin_install_via_proxy() -> None: try: _uninstall_and_kill_proxy_before_install() proxy_host = sdk_cmd._internal_leader_host() proxy_port = 8899 _install_and_run_proxy(proxy_host, proxy_port) plugin_name = "analysis-ukrainian" plugins = "https://s3.amazonaws.com/downloads.mesosphere.io/infinity-artifacts/elastic/analysis-ukrainian-7.9.3.zip" _check_proxy_healthy(proxy_host, proxy_port, plugins) sdk_service.update_configuration( package_name, service_name, { "elasticsearch": { "plugins": plugins, "plugin_http_proxy_host": proxy_host, "plugin_http_proxy_port": proxy_port, "plugin_https_proxy_host": proxy_host, "plugin_https_proxy_port": proxy_port, } }, config.DEFAULT_TASK_COUNT, ) config.check_elasticsearch_plugin_installed( plugin_name, service_name=service_name, expected_nodes_count=current_expected_task_count - current_non_node_task_count, ) _check_proxy_was_used() sdk_service.update_configuration( package_name, service_name, {"elasticsearch": { "plugins": "" }}, current_expected_task_count, ) config.check_elasticsearch_plugin_uninstalled( plugin_name, service_name=service_name) finally: _uninstall_and_kill_proxy()
def test_plugin_install_and_uninstall(default_populated_index: None) -> None: plugins = "analysis-icu" sdk_service.update_configuration( package_name, service_name, {"elasticsearch": {"plugins": plugins}}, current_expected_task_count, ) config.check_elasticsearch_plugin_installed(plugins, service_name=service_name) sdk_service.update_configuration( package_name, service_name, {"elasticsearch": {"plugins": ""}}, current_expected_task_count ) config.check_elasticsearch_plugin_uninstalled(plugins, service_name=service_name)
def test_adding_data_node_only_restarts_masters() -> None: initial_master_task_ids = sdk_tasks.get_task_ids(service_name, "master") initial_data_task_ids = sdk_tasks.get_task_ids(service_name, "data") initial_coordinator_task_ids = sdk_tasks.get_task_ids(service_name, "coordinator") # Get service configuration. _, svc_config, _ = sdk_cmd.svc_cli(package_name, service_name, "describe", parse_json=True) data_nodes_count = get_in(["data_nodes", "count"], svc_config) global current_expected_task_count # Increase the data nodes count by 1. sdk_service.update_configuration( package_name, service_name, {"data_nodes": {"count": data_nodes_count + 1}}, current_expected_task_count, # As of 2018-12-14, sdk_upgrade's `wait_for_deployment` has different behavior than # sdk_install's (which is what we wanted here), so don't use it. Check manually afterwards # with `sdk_tasks.check_running`. wait_for_deployment=False, ) sdk_plan.wait_for_kicked_off_deployment(service_name) sdk_plan.wait_for_completed_deployment(service_name) _, new_data_pod_info, _ = sdk_cmd.svc_cli( package_name, service_name, "pod info data-{}".format(data_nodes_count), parse_json=True ) # Get task ID for new data node task. new_data_task_id = get_in([0, "info", "taskId", "value"], new_data_pod_info) # Should be running 1 task more. current_expected_task_count += 1 sdk_tasks.check_running(service_name, current_expected_task_count) # Master nodes should restart. sdk_tasks.check_tasks_updated(service_name, "master", initial_master_task_ids) # Data node tasks should be the initial ones plus the new one. sdk_tasks.check_tasks_not_updated( service_name, "data", initial_data_task_ids + [new_data_task_id] ) # Coordinator tasks should not restart. sdk_tasks.check_tasks_not_updated(service_name, "coordinator", initial_coordinator_task_ids)
def test_custom_yaml_base64(): # Apply this custom YAML block as a base64-encoded string: # cluster: # routing: # allocation: # node_initial_primaries_recoveries: 3 # The default value is 4. We're just testing to make sure the YAML formatting survived intact and the setting # got updated in the config. base64_elasticsearch_yml = "Y2x1c3RlcjoNCiAgcm91dGluZzoNCiAgICBhbGxvY2F0aW9uOg0KICAgICAgbm9kZV9pbml0aWFsX3ByaW1hcmllc19yZWNvdmVyaWVzOiAz" sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"elasticsearch": {"custom_elasticsearch_yml": base64_elasticsearch_yml}}, current_expected_task_count, ) config.check_custom_elasticsearch_cluster_setting(service_name=foldered_name)
def test_custom_jmx_port(): expected_open_port = ":7200 (LISTEN)" new_config = {"cassandra": {"jmx_port": 7200}} sdk_service.update_configuration( config.PACKAGE_NAME, config.get_foldered_service_name(), new_config, config.DEFAULT_TASK_COUNT, ) sdk_plan.wait_for_completed_deployment(config.get_foldered_service_name()) tasks = sdk_tasks.get_service_tasks(config.get_foldered_service_name(), "node") for task in tasks: _, stdout, _ = sdk_cmd.run_cli("task exec {} lsof -i :7200".format( task.id)) assert expected_open_port in stdout
def test_custom_yaml_base64() -> None: # Apply this custom YAML block as a base64-encoded string: # cluster: # routing: # allocation: # node_initial_primaries_recoveries: 3 # script.allowed_contexts: ["search", "update"] base64_elasticsearch_yml = "".join( [ "Y2x1c3RlcjoKICByb3V0aW5nOgogICAgYWxsb2NhdGlvbjoKICAgICAgbm9kZV9pbml0aWFsX3By", "aW1hcmllc19yZWNvdmVyaWVzOiAzCnNjcmlwdC5hbGxvd2VkX2NvbnRleHRzOiBbInNlYXJjaCIs", "ICJ1cGRhdGUiXQ==", ] ) sdk_service.update_configuration( package_name, service_name, {"elasticsearch": {"custom_elasticsearch_yml": base64_elasticsearch_yml}}, current_expected_task_count, ) # We're testing two things here: # 1. The default value for `cluster.routing.allocation.node_initial_primaries_recoveries` is 4. # Here we want to make sure that the end-result for the multiple YAML/Mustache compilation steps # results in a valid elasticsearch.yml file, with the correct setting value. config.check_custom_elasticsearch_cluster_setting( service_name, ["cluster", "routing", "allocation", "node_initial_primaries_recoveries"], "3" ) # 2. `script.allowed_contexts` has an "array of strings" value defined in the custom YAML. Here # we're also making sure that the end-result for the multiple YAML/Mustache compilation steps # results in a valid elasticsearch.yml file, but with a trickier compilation case due to the # setting value being an array of strings. config.check_custom_elasticsearch_cluster_setting( service_name, ["script", "allowed_contexts"], ["search", "update"] )
def test_upgrade_from_xpack_enabled( package_name: str, service_name: str, options: Dict[str, Any], expected_task_count: int, from_version: str, to_version: str = "stub-universe", ) -> None: # This test needs to run some code in between the Universe version installation and the upgrade # to the 'stub-universe' version, so it cannot use `sdk_upgrade.test_upgrade`. http_user = DEFAULT_ELASTICSEARCH_USER http_password = DEFAULT_ELASTICSEARCH_PASSWORD sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name, expected_running_tasks=expected_task_count, additional_options={"elasticsearch": {"xpack_enabled": True}}, package_version=from_version, ) document_es_5_id = 1 document_es_5_fields = {"name": "Elasticsearch 5: X-Pack enabled", "role": "search engine"} create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. _curl_query( service_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, to_version, { "service": {"update_strategy": "parallel"}, "elasticsearch": {"xpack_security_enabled": False}, }, expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = _curl_query(service_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to ES6. The # ".security" index (along with any others returned by the "assistance" API) needs to be # upgraded. for index in response["indices"]: _curl_query( service_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to do the actual configuration update, possibly # enabling X-Pack security. sdk_service.update_configuration(package_name, service_name, options, expected_task_count) document_es_6_post_update_id = 3 document_es_6_post_update_fields = { "name": "Elasticsearch 6: Post update", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_post_update_id, document_es_6_post_update_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. verify_document( service_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_post_update_id, document_es_6_post_update_fields, http_user=http_user, http_password=http_password, )
def test_security_toggle_with_kibana(default_populated_index): # Verify that commercial APIs are disabled by default in Elasticsearch. config.verify_commercial_api_status(False, service_name=foldered_name) # Write some data with security disabled, enabled security, and afterwards verify that we can # still read what we wrote. document_security_disabled_id = 1 document_security_disabled_fields = {"name": "Elasticsearch", "role": "search engine"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, document_security_disabled_fields, service_name=foldered_name, ) # Verify that basic license is enabled by default. config.verify_xpack_license("basic", service_name=foldered_name) # Install Kibana. elasticsearch_url = "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, {"kibana": {"elasticsearch_url": elasticsearch_url}}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration("service/{}/".format(config.KIBANA_PACKAGE_NAME)) # Uninstall it. sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) # Enable Elasticsearch security. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "elasticsearch": {"xpack_security_enabled": True}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # This should still be disabled. config.verify_commercial_api_status(False, service_name=foldered_name) # Start trial license. config.start_trial_license(service_name=foldered_name) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(foldered_name) # Verify trial license is working. config.verify_xpack_license( "trial", service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) config.verify_commercial_api_status( True, service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) # Write some data with security enabled, disable security, and afterwards verify that we can # still read what we wrote. document_security_enabled_id = 2 document_security_enabled_fields = {"name": "X-Pack", "role": "commercial plugin"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, document_security_enabled_fields, service_name=foldered_name, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) # Install Kibana with security enabled. sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "kibana": { "elasticsearch_url": elasticsearch_url, "elasticsearch_xpack_security_enabled": True, "user": config.DEFAULT_KIBANA_USER, "password": passwords["kibana"], } }, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. Notice that with security enabled, one has to access # /service/kibana/login instead of /service/kibana. config.check_kibana_adminrouter_integration( "service/{}/login".format(config.KIBANA_PACKAGE_NAME) ) # Uninstall it. sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) # Disable Elastic security. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, { "elasticsearch": {"xpack_security_enabled": False}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # Verify we can read what was written before toggling security, without basic HTTP credentials. document_security_disabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, service_name=foldered_name, ) assert ( document_security_disabled["_source"]["name"] == document_security_disabled_fields["name"] ) # Verify we can read what was written when security was enabled, without basic HTTP credentials. document_security_enabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, service_name=foldered_name, ) assert document_security_enabled["_source"]["name"] == document_security_enabled_fields["name"] # Set update_strategy back to serial. sdk_service.update_configuration( config.PACKAGE_NAME, foldered_name, {"service": {"update_strategy": "serial"}}, current_expected_task_count, )
def test_upgrade_from_xpack_enabled_to_xpack_security_enabled(): # Since this test uninstalls the Elastic service that is shared between all previous tests, # reset the number of expected tasks to the default value. This is checked before all tests # by the `pre_test_setup` fixture. global current_expected_task_count current_expected_task_count = config.DEFAULT_TASK_COUNT # This test needs to run some code in between the Universe version installation and the stub Universe # upgrade, so it cannot use `sdk_upgrade.test_upgrade`. log.info("Updating from X-Pack 'enabled' to X-Pack security 'enabled'") http_user = config.DEFAULT_ELASTICSEARCH_USER http_password = config.DEFAULT_ELASTICSEARCH_PASSWORD package_name = config.PACKAGE_NAME sdk_install.uninstall(package_name, foldered_name) # Move Universe repo to the top of the repo list so that we can first install the Universe # version. _, universe_version = sdk_repository.move_universe_repo(package_name, universe_repo_index=0) sdk_install.install( package_name, foldered_name, expected_running_tasks=current_expected_task_count, additional_options={"elasticsearch": {"xpack_enabled": True}}, package_version=universe_version, ) document_es_5_id = 1 document_es_5_fields = {"name": "Elasticsearch 5: X-Pack enabled", "role": "search engine"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. config._curl_query( foldered_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # Move Universe repo back to the bottom of the repo list so that we can upgrade to the version # under test. _, test_version = sdk_repository.move_universe_repo(package_name) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, foldered_name, test_version, { "service": {"update_strategy": "parallel"}, "elasticsearch": {"xpack_security_enabled": False}, }, current_expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = config._curl_query(foldered_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack # security enabled" on ES6. The ".security" index (along with any others returned by the # "assistance" API) needs to be upgraded. for index in response["indices"]: config._curl_query( foldered_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to enable X-Pack security. sdk_service.update_configuration( package_name, foldered_name, {"elasticsearch": {"xpack_security_enabled": True}}, current_expected_task_count, ) document_es_6_security_enabled_id = 3 document_es_6_security_enabled_fields = { "name": "Elasticsearch 6: X-Pack security enabled", "role": "search engine", } config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_6_security_enabled_id, document_es_6_security_enabled_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. config.verify_document( foldered_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) config.verify_document( foldered_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) config.verify_document( foldered_name, document_es_6_security_enabled_id, document_es_6_security_enabled_fields, http_user=http_user, http_password=http_password, )
def test_security_toggle_with_kibana(default_populated_index: None) -> None: http_user = config.DEFAULT_ELASTICSEARCH_USER # Verify that commercial APIs are disabled by default in Elasticsearch. config.verify_commercial_api_status(False, service_name=service_name) # Write some data with security disabled, enabled security, and afterwards verify that we can # still read what we wrote. document_security_disabled_id = 1 document_security_disabled_fields = {"name": "Elasticsearch", "role": "search engine"} config.create_document( index_name, index_type, document_security_disabled_id, document_security_disabled_fields, service_name=service_name, ) # Verify that basic license is enabled by default. config.verify_xpack_license("basic", service_name=service_name) # Install Kibana. elasticsearch_url = "http://" + sdk_hosts.vip_host(service_name, "coordinator", 9200) sdk_install.install( kibana_package_name, kibana_service_name, 0, {"kibana": {"elasticsearch_url": elasticsearch_url}}, timeout_seconds=kibana_timeout, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration("service/{}/app/kibana".format(kibana_package_name)) # Uninstall it. sdk_install.uninstall(kibana_package_name, kibana_package_name) # Enable Elasticsearch security. sdk_service.update_configuration( package_name, service_name, { "elasticsearch": {"xpack_security_enabled": True}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # This should still be disabled. config.verify_commercial_api_status(False, service_name=service_name) # Start trial license. config.start_trial_license(service_name=service_name) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(service_name) # Verify trial license is working. config.verify_xpack_license( "trial", service_name=service_name, http_user=http_user, http_password=passwords["elastic"] ) config.verify_commercial_api_status( True, service_name=service_name, http_user=http_user, http_password=passwords["elastic"] ) # Write some data with security enabled, disable security, and afterwards verify that we can # still read what we wrote. document_security_enabled_id = 2 document_security_enabled_fields = {"name": "X-Pack", "role": "commercial plugin"} config.create_document( index_name, index_type, document_security_enabled_id, document_security_enabled_fields, service_name=service_name, http_user=http_user, http_password=passwords["elastic"], ) # Install Kibana with security enabled. sdk_install.install( kibana_package_name, kibana_package_name, 0, { "kibana": { "elasticsearch_url": elasticsearch_url, "elasticsearch_xpack_security_enabled": True, "user": config.DEFAULT_KIBANA_USER, "password": passwords["kibana"], } }, timeout_seconds=kibana_timeout, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. Notice that with security enabled, one has to access # /service/kibana/login instead of /service/kibana. config.check_kibana_adminrouter_integration("service/{}/login".format(kibana_package_name)) # Uninstall it. sdk_install.uninstall(kibana_package_name, kibana_package_name) # Disable Elastic security. sdk_service.update_configuration( package_name, service_name, { "elasticsearch": {"xpack_security_enabled": False}, "service": {"update_strategy": "parallel"}, }, current_expected_task_count, ) # Verify we can read what was written before toggling security, without basic HTTP credentials. document_security_disabled = config.get_document( index_name, index_type, document_security_disabled_id, service_name=service_name ) assert ( document_security_disabled["_source"]["name"] == document_security_disabled_fields["name"] ) # Verify we can read what was written when security was enabled, without basic HTTP credentials. document_security_enabled = config.get_document( index_name, index_type, document_security_enabled_id, service_name=service_name ) assert document_security_enabled["_source"]["name"] == document_security_enabled_fields["name"] # Set update_strategy back to serial. sdk_service.update_configuration( package_name, service_name, {"service": {"update_strategy": "serial"}}, current_expected_task_count, )
def test_upgrade_from_xpack_enabled( package_name: str, service_name: str, options: Dict[str, Any], expected_task_count: int, from_version: str, to_version: str = "stub-universe", ) -> None: # This test needs to run some code in between the Universe version installation and the upgrade # to the 'stub-universe' version, so it cannot use `sdk_upgrade.test_upgrade`. http_user = DEFAULT_ELASTICSEARCH_USER http_password = DEFAULT_ELASTICSEARCH_PASSWORD sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name, expected_running_tasks=expected_task_count, additional_options={"elasticsearch": { "xpack_enabled": True }}, package_version=from_version, ) document_es_5_id = 1 document_es_5_fields = { "name": "Elasticsearch 5: X-Pack enabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. _curl_query( service_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, to_version, { "service": { "update_strategy": "parallel" }, "elasticsearch": { "xpack_security_enabled": False }, }, expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = _curl_query(service_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to ES6. The # ".security" index (along with any others returned by the "assistance" API) needs to be # upgraded. for index in response["indices"]: _curl_query( service_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to do the actual configuration update, possibly # enabling X-Pack security. sdk_service.update_configuration(package_name, service_name, options, expected_task_count) document_es_6_post_update_id = 3 document_es_6_post_update_fields = { "name": "Elasticsearch 6: Post update", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_post_update_id, document_es_6_post_update_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. verify_document( service_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_post_update_id, document_es_6_post_update_fields, http_user=http_user, http_password=http_password, )
def test_security_toggle_with_kibana() -> None: try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME) service_account_info = transport_encryption.setup_service_account(config.SERVICE_NAME) sdk_install.install( config.PACKAGE_NAME, service_name=config.SERVICE_NAME, expected_running_tasks=config.DEFAULT_TASK_COUNT, additional_options={ "service": { "name": config.SERVICE_NAME, "service_account": service_account_info["name"], "service_account_secret": service_account_info["secret"], } }, timeout_seconds=30 * 60, wait_for_deployment=True, ) # Write some data with security disabled, enabled security, and afterwards verify that we can # still read what we wrote. document_security_disabled_id = 1 document_security_disabled_fields = {"name": "Elasticsearch", "role": "search engine"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, document_security_disabled_fields, service_name=config.SERVICE_NAME, ) # Install Kibana. coordinator_host = sdk_hosts.vip_host(config.SERVICE_NAME, "coordinator", 9200) sdk_install.install( config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME, 0, {"kibana": {"elasticsearch_url": "http://" + coordinator_host}}, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME) ) config.check_kibana_adminrouter_integration( "service/{}/app/kibana".format(config.KIBANA_PACKAGE_NAME) ) # Uninstall it. sdk_install.uninstall(config.KIBANA_SERVICE_NAME, config.KIBANA_PACKAGE_NAME) # Enable Elasticsearch security. sdk_service.update_configuration( config.PACKAGE_NAME, config.SERVICE_NAME, { "elasticsearch": {"xpack_security_enabled": True}, "service": { "update_strategy": "parallel", "security": {"transport_encryption": {"enabled": True}}, }, }, config.DEFAULT_TASK_COUNT, wait_for_deployment=False, ) # Set up passwords. Basic HTTP credentials will have to be used in HTTP requests to # Elasticsearch from now on. passwords = config.setup_passwords(config.SERVICE_NAME, https=True) # Write some data with security enabled, disable security, and afterwards verify that we can # still read what we wrote. document_security_enabled_id = 2 document_security_enabled_fields = {"name": "X-Pack", "role": "commercial plugin"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, document_security_enabled_fields, service_name=config.SERVICE_NAME, https=True, http_user=config.DEFAULT_ELASTICSEARCH_USER, http_password=passwords["elastic"], ) # Install Kibana with security enabled. sdk_install.install( config.KIBANA_SERVICE_NAME, config.KIBANA_PACKAGE_NAME, 0, { "service": {"name": config.KIBANA_SERVICE_NAME}, "kibana": { "elasticsearch_tls": True, "elasticsearch_url": "https://" + coordinator_host, "elasticsearch_xpack_security_enabled": True, "user": config.DEFAULT_KIBANA_USER, "password": passwords["kibana"], }, }, timeout_seconds=config.KIBANA_DEFAULT_TIMEOUT, wait_for_deployment=False, insert_strict_options=False, ) # Verify that it works. config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME) ) config.check_kibana_adminrouter_integration( "service/{}/login".format(config.KIBANA_PACKAGE_NAME) ) # Uninstall it. sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME) # Disable Elastic security. sdk_service.update_configuration( config.PACKAGE_NAME, config.SERVICE_NAME, { "elasticsearch": {"xpack_security_enabled": False}, "service": { "update_strategy": "parallel", "security": {"transport_encryption": {"enabled": False}}, }, }, config.DEFAULT_TASK_COUNT, wait_for_deployment=True, ) # Verify we can read what was written before toggling security, without basic HTTP credentials. document_security_disabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_disabled_id, service_name=config.SERVICE_NAME, ) assert ( document_security_disabled["_source"]["name"] == document_security_disabled_fields["name"] ) # Verify we can read what was written when security was enabled, without basic HTTP credentials. document_security_enabled = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_security_enabled_id, service_name=config.SERVICE_NAME, ) assert ( document_security_enabled["_source"]["name"] == document_security_enabled_fields["name"] ) finally: sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) transport_encryption.cleanup_service_account(config.SERVICE_NAME, service_account_info)