def test_disable_quota_role(): # Add new pods to service which should be launched with the new role. # Turn off legacy role. options = { "service": { "name": SERVICE_NAME, "role": "slave_public", "enable_role_migration": False } } sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, SERVICE_NAME, expected_running_tasks=3, to_options=options, to_version=None, ) # Get the current service state to verify roles have applied. service_roles = sdk_utils.get_service_roles(SERVICE_NAME) current_task_roles = service_roles["task-roles"] # We must have some role! assert len(current_task_roles) > 0 assert len(current_task_roles) == 3 assert LEGACY_ROLE in current_task_roles.values() assert ENFORCED_ROLE not in current_task_roles.values() # Ensure we're not MULTI_ROLE, and only using the legacy-role. assert service_roles["framework-roles"] is None assert service_roles["framework-role"] == LEGACY_ROLE
def test_upgrade( package_name: str, service_name: str, expected_running_tasks_before_upgrade: int, expected_running_tasks_after_upgrade: int, from_version: str = None, from_options: Dict[str, Any] = {}, to_version: str = None, to_options: Optional[Dict[str, Any]] = None, timeout_seconds: int = sdk_upgrade.TIMEOUT_SECONDS, wait_for_deployment: bool = True, ) -> None: sdk_install.uninstall(package_name, service_name) log.info("Called with 'from' version '{}' and 'to' version '{}'".format( from_version, to_version)) universe_version = None try: # Move the Universe repo to the top of the repo list so that we can first install the latest # released version. test_version, universe_version = sdk_repository.move_universe_repo( package_name, universe_repo_index=0) log.info("Found 'test' version: {}".format(test_version)) log.info("Found 'universe' version: {}".format(universe_version)) from_version = from_version or universe_version to_version = to_version or test_version log.info("Will upgrade {} from version '{}' to '{}'".format( package_name, from_version, to_version)) log.info("Installing {} 'from' version: {}".format( package_name, from_version)) sdk_install.install( package_name, service_name, expected_running_tasks_before_upgrade, package_version=from_version, additional_options=from_options, timeout_seconds=timeout_seconds, wait_for_deployment=wait_for_deployment, ) finally: if universe_version: # Return the Universe repo back to the bottom of the repo list so that we can upgrade to # the build version. sdk_repository.move_universe_repo(package_name) log.info("Upgrading {} from version '{}' to '{}'".format( package_name, from_version, to_version)) sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, to_version, to_options or from_options, expected_running_tasks_after_upgrade, wait_for_deployment, timeout_seconds, )
def test_envvar_accross_restarts(): sleep_duration = 9999 sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, config.SERVICE_NAME, to_package_version=None, additional_options={ "service": {"name": config.SERVICE_NAME, "sleep": sleep_duration, "yaml": "sidecar"} }, expected_running_tasks=2, wait_for_deployment=True, ) for attempt in range(3): cmd_list = ["pod", "restart", "hello-0"] sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, " ".join(cmd_list)) sdk_plan.wait_for_kicked_off_recovery(config.SERVICE_NAME) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "hello-0-server", "env") envvar = "CONFIG_SLEEP_DURATION=" envvar_pos = stdout.find(envvar) if envvar_pos < 0: raise Exception("Required envvar not found") if not stdout[envvar_pos + len(envvar) :].startswith("{}".format(sleep_duration)): found_string = stdout[envvar_pos + len(envvar) : envvar_pos + len(envvar) + 15] log.error( "(%d) Looking for %s%d but found: %s", attempt, envvar, sleep_duration, found_string ) raise Exception("Envvar not set to required value")
def test_switch_to_legacy_role(): options = { "service": { "name": SERVICE_NAME, "role": "slave_public", "enable_role_migration": True } } sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, SERVICE_NAME, expected_running_tasks=3, to_options=options, to_version=None, ) # Get the current service state to verify roles have applied. service_roles = sdk_utils.get_service_roles(SERVICE_NAME) current_task_roles = service_roles["task-roles"] # We must have some role! assert len(current_task_roles) > 0 assert LEGACY_ROLE not in current_task_roles.values() assert ENFORCED_ROLE in current_task_roles.values() assert service_roles["framework-roles"] is not None assert service_roles["framework-role"] is None assert len(service_roles["framework-roles"]) == 2 assert LEGACY_ROLE in service_roles["framework-roles"] assert ENFORCED_ROLE in service_roles["framework-roles"]
def test_update_scheduler_role(): options = { "service": {"name": SERVICE_NAME, "role": ENFORCED_ROLE, "enable_role_migration": True} } sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, SERVICE_NAME, expected_running_tasks=3, to_options=options, to_version=None, ) # Get the current service state to verify roles have applied. service_roles = sdk_utils.get_service_roles(SERVICE_NAME) current_task_roles = service_roles["task-roles"] # We must have some role! assert len(current_task_roles) > 0 assert LEGACY_ROLE in current_task_roles.values() # Pods haven't been replaced yet. assert ENFORCED_ROLE not in current_task_roles.values() # Ensure we are MULTI_ROLE. assert service_roles["framework-roles"] is not None assert service_roles["framework-role"] is None assert len(service_roles["framework-roles"]) == 2 assert LEGACY_ROLE in service_roles["framework-roles"] assert ENFORCED_ROLE in service_roles["framework-roles"]
def test_old_tasks_get_relaunched_with_new_config(): hello_task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello") assert len(hello_task_id) > 0, "Got an empty list of task_ids" # Start update plan with options that have list of yaml files to make it # launch in multi service mode with updated config sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, config.SERVICE_NAME, to_version=None, to_options={ "service": { "yaml": "", "yamls": "svc,foobar_service_name" }, "hello": { "cpus": 0.2 }, }, expected_running_tasks=4, wait_for_deployment=False, ) # Ensure the old task DOES relaunch sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME, multiservice_name="foobar") sdk_tasks.check_task_relaunched("hello-0-server", hello_task_id.pop()) assert len(sdk_tasks.get_task_ids(config.SERVICE_NAME, "foo")) == 1
def test_old_tasks_not_relaunched(): hello_task_id = sdk_tasks.get_task_ids(config.SERVICE_NAME, "hello") assert len(hello_task_id) > 0, "Got an empty list of task_ids" # Start update plan with options that have list of yaml files to make it launch in multi service mode sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, config.SERVICE_NAME, to_package_version=None, additional_options={ "service": { "yaml": "", "yamls": "svc,foobar_service_name" } }, expected_running_tasks=4, wait_for_deployment=False, ) # Ensure new tasks are launched but the old task does not relaunch sdk_plan.wait_for_completed_deployment(config.SERVICE_NAME, multiservice_name="foobar") sdk_tasks.check_task_not_relaunched( config.SERVICE_NAME, "hello-0-server", hello_task_id.pop(), multiservice_name=config.SERVICE_NAME, ) assert len(sdk_tasks.get_task_ids(config.SERVICE_NAME, "foo")) == 1
def test_envvar_accross_restarts(): class ConfigException(Exception): pass def assert_envvar_has_value(envvar: str, expected_value: str): _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "hello-0-server", "env") env = dict(l.strip().split("=", 1) for l in stdout.strip().split('\n')) val = env.get(envvar, "absent") if val == "absent": raise ConfigException("Required envvar not found") if val != expected_value: log.error("Looking for %s=%s but found: %s", envvar, expected_value, val) raise ConfigException("Envvar not set to required value") log.info("%s has expected value %s", envvar, expected_value) envvar = "CONFIG_SLEEP_DURATION" sleep_duration = 9999 try: assert_envvar_has_value(envvar, str(sleep_duration)) except ConfigException: log.debug("%s is set to something other than %d as expected", envvar, sleep_duration) sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, config.SERVICE_NAME, to_version=None, to_options={ "service": { "name": config.SERVICE_NAME, "sleep": sleep_duration, "yaml": "sidecar" } }, expected_running_tasks=2, wait_for_deployment=True, ) log.info("Checking after update") assert_envvar_has_value(envvar, str(sleep_duration)) cmd_list = ["pod", "restart", "hello-0"] sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, " ".join(cmd_list)) sdk_plan.wait_for_kicked_off_recovery(config.SERVICE_NAME) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) log.info("Checking after restart") assert_envvar_has_value(envvar, str(sleep_duration))
def test_envvar_accross_restarts(): class ConfigException(Exception): pass def assert_envvar_has_value(envvar: str, expected_value: str): _, stdout, _ = sdk_cmd.service_task_exec(config.SERVICE_NAME, "hello-0-server", "env") env = dict(l.strip().split("=", 1) for l in stdout.strip().split('\n')) val = env.get(envvar, "absent") if val == "absent": raise ConfigException("Required envvar not found") if val != expected_value: log.error("Looking for %s=%s but found: %s", envvar, expected_value, val) raise ConfigException("Envvar not set to required value") log.info("%s has expected value %s", envvar, expected_value) envvar = "CONFIG_SLEEP_DURATION" sleep_duration = 9999 try: assert_envvar_has_value(envvar, str(sleep_duration)) except ConfigException: log.debug("%s is set to something other than %d as expected", envvar, sleep_duration) sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, config.SERVICE_NAME, to_version=None, to_options={ "service": {"name": config.SERVICE_NAME, "sleep": sleep_duration, "yaml": "sidecar"} }, expected_running_tasks=2, wait_for_deployment=True, ) log.info("Checking after update") assert_envvar_has_value(envvar, str(sleep_duration)) cmd_list = ["pod", "restart", "hello-0"] sdk_cmd.svc_cli(config.PACKAGE_NAME, config.SERVICE_NAME, " ".join(cmd_list)) sdk_plan.wait_for_kicked_off_recovery(config.SERVICE_NAME) sdk_plan.wait_for_completed_recovery(config.SERVICE_NAME) log.info("Checking after restart") assert_envvar_has_value(envvar, str(sleep_duration))
def update_configuration( package_name, service_name, configuration, expected_task_count, wait_for_deployment=True, timeout_seconds=DEFAULT_TIMEOUT_SECONDS, ): sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, None, configuration, expected_task_count, wait_for_deployment=wait_for_deployment, timeout_seconds=timeout_seconds, )
def update_configuration( package_name: str, service_name: str, configuration: Dict[str, Any], expected_task_count: int, wait_for_deployment: bool = True, timeout_seconds: int = DEFAULT_TIMEOUT_SECONDS, ) -> None: sdk_upgrade.update_or_upgrade_or_downgrade( package_name=package_name, service_name=service_name, to_version=None, to_options=configuration, expected_running_tasks=expected_task_count, wait_for_deployment=wait_for_deployment, timeout_seconds=timeout_seconds, )
def test_config_update_across_restart(): foldered_service_name = config.get_foldered_service_name() batch_size_warn_threshold_in_kb = 15 sdk_upgrade.update_or_upgrade_or_downgrade( config.PACKAGE_NAME, foldered_service_name, to_package_version=None, additional_options={ "service": {"name": foldered_service_name}, "cassandra": {"batch_size_warn_threshold_in_kb": batch_size_warn_threshold_in_kb}, }, expected_running_tasks=config.DEFAULT_TASK_COUNT, wait_for_deployment=True, timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT, ) for _ in range(3): cmd_list = ["pod", "restart", "node-0"] sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_service_name, " ".join(cmd_list)) sdk_plan.wait_for_kicked_off_recovery(foldered_service_name) sdk_plan.wait_for_completed_recovery( foldered_service_name, timeout_seconds=config.DEFAULT_CASSANDRA_TIMEOUT ) _, stdout, _ = sdk_cmd.service_task_exec(foldered_service_name, "node-0-server", "env") envvar = "CASSANDRA_BATCH_SIZE_WARN_THRESHOLD_IN_KB=" envvar_pos = stdout.find(envvar) if envvar_pos < 0: raise Exception("Required envvar not found") if not stdout[envvar_pos + len(envvar) :].startswith( "{}".format(batch_size_warn_threshold_in_kb) ): found_string = stdout[envvar_pos + len(envvar) : envvar_pos + len(envvar) + 15] log.error( "Looking for %s%d but found: %s", envvar, batch_size_warn_threshold_in_kb, found_string, ) raise Exception("Envvar not set to required value")
def test_upgrade_from_xpack_enabled( package_name: str, service_name: str, options: Dict[str, Any], expected_task_count: int, from_version: str, to_version: str = "stub-universe", ) -> None: # This test needs to run some code in between the Universe version installation and the upgrade # to the 'stub-universe' version, so it cannot use `sdk_upgrade.test_upgrade`. http_user = DEFAULT_ELASTICSEARCH_USER http_password = DEFAULT_ELASTICSEARCH_PASSWORD sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name, expected_running_tasks=expected_task_count, additional_options={"elasticsearch": { "xpack_enabled": True }}, package_version=from_version, ) document_es_5_id = 1 document_es_5_fields = { "name": "Elasticsearch 5: X-Pack enabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. _curl_query( service_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, to_version, { "service": { "update_strategy": "parallel" }, "elasticsearch": { "xpack_security_enabled": False }, }, expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = _curl_query(service_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to ES6. The # ".security" index (along with any others returned by the "assistance" API) needs to be # upgraded. for index in response["indices"]: _curl_query( service_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to do the actual configuration update, possibly # enabling X-Pack security. sdk_service.update_configuration(package_name, service_name, options, expected_task_count) document_es_6_post_update_id = 3 document_es_6_post_update_fields = { "name": "Elasticsearch 6: Post update", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_post_update_id, document_es_6_post_update_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. verify_document( service_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_post_update_id, document_es_6_post_update_fields, http_user=http_user, http_password=http_password, )
def test_upgrade_from_xpack_enabled( package_name: str, service_name: str, options: Dict[str, Any], expected_task_count: int, from_version: str, to_version: str = "stub-universe", ) -> None: # This test needs to run some code in between the Universe version installation and the upgrade # to the 'stub-universe' version, so it cannot use `sdk_upgrade.test_upgrade`. http_user = DEFAULT_ELASTICSEARCH_USER http_password = DEFAULT_ELASTICSEARCH_PASSWORD sdk_install.uninstall(package_name, service_name) sdk_install.install( package_name, service_name, expected_running_tasks=expected_task_count, additional_options={"elasticsearch": {"xpack_enabled": True}}, package_version=from_version, ) document_es_5_id = 1 document_es_5_fields = {"name": "Elasticsearch 5: X-Pack enabled", "role": "search engine"} create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. _curl_query( service_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, service_name, to_version, { "service": {"update_strategy": "parallel"}, "elasticsearch": {"xpack_security_enabled": False}, }, expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = _curl_query(service_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to ES6. The # ".security" index (along with any others returned by the "assistance" API) needs to be # upgraded. for index in response["indices"]: _curl_query( service_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to do the actual configuration update, possibly # enabling X-Pack security. sdk_service.update_configuration(package_name, service_name, options, expected_task_count) document_es_6_post_update_id = 3 document_es_6_post_update_fields = { "name": "Elasticsearch 6: Post update", "role": "search engine", } create_document( DEFAULT_INDEX_NAME, DEFAULT_INDEX_TYPE, document_es_6_post_update_id, document_es_6_post_update_fields, service_name=service_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. verify_document( service_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) verify_document( service_name, document_es_6_post_update_id, document_es_6_post_update_fields, http_user=http_user, http_password=http_password, )
def test_upgrade_from_xpack_enabled_to_xpack_security_enabled(): # Since this test uninstalls the Elastic service that is shared between all previous tests, # reset the number of expected tasks to the default value. This is checked before all tests # by the `pre_test_setup` fixture. global current_expected_task_count current_expected_task_count = config.DEFAULT_TASK_COUNT # This test needs to run some code in between the Universe version installation and the stub Universe # upgrade, so it cannot use `sdk_upgrade.test_upgrade`. log.info("Updating from X-Pack 'enabled' to X-Pack security 'enabled'") http_user = config.DEFAULT_ELASTICSEARCH_USER http_password = config.DEFAULT_ELASTICSEARCH_PASSWORD package_name = config.PACKAGE_NAME sdk_install.uninstall(package_name, foldered_name) # Move Universe repo to the top of the repo list so that we can first install the Universe # version. _, universe_version = sdk_repository.move_universe_repo(package_name, universe_repo_index=0) sdk_install.install( package_name, foldered_name, expected_running_tasks=current_expected_task_count, additional_options={"elasticsearch": {"xpack_enabled": True}}, package_version=universe_version, ) document_es_5_id = 1 document_es_5_fields = {"name": "Elasticsearch 5: X-Pack enabled", "role": "search engine"} config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_5_id, document_es_5_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # This is the first crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack security # enabled" on ES6. The default "changeme" password doesn't work anymore on ES6, so passwords # *must* be *explicitly* set, otherwise nodes won't authenticate requests, leaving the cluster # unavailable. Users will have to do this manually when upgrading. config._curl_query( foldered_name, "POST", "_xpack/security/user/{}/_password".format(http_user), json_body={"password": http_password}, http_user=http_user, http_password=http_password, ) # Move Universe repo back to the bottom of the repo list so that we can upgrade to the version # under test. _, test_version = sdk_repository.move_universe_repo(package_name) # First we upgrade to "X-Pack security enabled" set to false on ES6, so that we can use the # X-Pack migration assistance and upgrade APIs. sdk_upgrade.update_or_upgrade_or_downgrade( package_name, foldered_name, test_version, { "service": {"update_strategy": "parallel"}, "elasticsearch": {"xpack_security_enabled": False}, }, current_expected_task_count, ) # Get list of indices to upgrade from here. The response looks something like: # { # "indices" : { # ".security" : { # "action_required" : "upgrade" # }, # ".watches" : { # "action_required" : "upgrade" # } # } # } response = config._curl_query(foldered_name, "GET", "_xpack/migration/assistance?pretty") # This is the second crucial step when upgrading from "X-Pack enabled" on ES5 to "X-Pack # security enabled" on ES6. The ".security" index (along with any others returned by the # "assistance" API) needs to be upgraded. for index in response["indices"]: config._curl_query( foldered_name, "POST", "_xpack/migration/upgrade/{}?pretty".format(index), http_user=http_user, http_password=http_password, ) document_es_6_security_disabled_id = 2 document_es_6_security_disabled_fields = { "name": "Elasticsearch 6: X-Pack security disabled", "role": "search engine", } config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # After upgrading the indices, we're now safe to enable X-Pack security. sdk_service.update_configuration( package_name, foldered_name, {"elasticsearch": {"xpack_security_enabled": True}}, current_expected_task_count, ) document_es_6_security_enabled_id = 3 document_es_6_security_enabled_fields = { "name": "Elasticsearch 6: X-Pack security enabled", "role": "search engine", } config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, document_es_6_security_enabled_id, document_es_6_security_enabled_fields, service_name=foldered_name, http_user=http_user, http_password=http_password, ) # Make sure that documents were created and are accessible. config.verify_document( foldered_name, document_es_5_id, document_es_5_fields, http_user=http_user, http_password=http_password, ) config.verify_document( foldered_name, document_es_6_security_disabled_id, document_es_6_security_disabled_fields, http_user=http_user, http_password=http_password, ) config.verify_document( foldered_name, document_es_6_security_enabled_id, document_es_6_security_enabled_fields, http_user=http_user, http_password=http_password, )