def install( package_name, running_task_count, service_name=None, additional_options={}, package_version=None, check_suppression=True): if not service_name: service_name = package_name start = time.time() merged_options = get_package_options(additional_options) sdk_utils.out('Installing {} with options={} version={}'.format( package_name, merged_options, package_version)) # install_package_and_wait silently waits for all marathon deployments to clear. # to give some visibility, install in the following order: # 1. install package shakedown.install_package( package_name, package_version=package_version, options_json=merged_options) # 2. wait for expected tasks to come up sdk_utils.out("Waiting for expected tasks to come up...") sdk_tasks.check_running(service_name, running_task_count) sdk_plan.wait_for_completed_deployment(service_name) # 3. check service health marathon_client = dcos.marathon.create_client() def is_deployment_finished(): # TODO(nickbp): upstream fix to shakedown, which currently checks for ANY deployments rather # than the one we care about deploying_apps = set([]) sdk_utils.out("Getting deployments") deployments = marathon_client.get_deployments() sdk_utils.out("Found {} deployments".format(len(deployments))) for deployment in deployments: sdk_utils.out("Deployment: {}".format(deployment)) for app in deployment.get('affectedApps', []): sdk_utils.out("Adding {}".format(app)) deploying_apps.add(app) sdk_utils.out('Checking that deployment of {} has ended:\n- Deploying apps: {}'.format(service_name, deploying_apps)) return not '/{}'.format(service_name) in deploying_apps sdk_utils.out("Waiting for marathon deployment to finish...") sdk_spin.time_wait_noisy(is_deployment_finished) # 4. Ensure the framework is suppressed. # # This is only configurable in order to support installs from # Universe during the upgrade_downgrade tests, because currently # the suppression endpoint isn't supported by all frameworks in # Universe. It can be removed once all frameworks rely on # dcos-commons >= 0.13. if check_suppression: sdk_utils.out("Waiting for framework to be suppressed...") sdk_spin.time_wait_noisy( lambda: sdk_api.is_suppressed(service_name)) sdk_utils.out('Install done after {}'.format(sdk_spin.pretty_time(time.time() - start)))
def test_marathon_rack_not_found(): def fun(): try: return service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) except: return False shakedown.install_package(PACKAGE_NAME, service_name=SERVICE_NAME, options_json=install.get_package_options( additional_options={'service':{'placement_constraint':'rack_id:LIKE:rack-foo-.*'}} ), wait_for_completion=False) try: tasks.check_running(PACKAGE_NAME, 1, timeout_seconds=120) assert False, "Should have failed to install" except AssertionError as arg: raise arg except: pass # expected to fail pl = spin.time_wait_return(fun) # check that first node is still (unsuccessfully) looking for a match: assert pl['status'] == 'IN_PROGRESS' assert pl['phases'][0]['status'] == 'IN_PROGRESS' # if so early, it can be PREPARED ? assert pl['phases'][0]['steps'][0]['status'] in ('PREPARED', 'PENDING') assert pl['phases'][0]['steps'][1]['status'] == 'PENDING' assert pl['phases'][0]['steps'][2]['status'] == 'PENDING' install.uninstall(SERVICE_NAME, PACKAGE_NAME)
def test_install_marathon(): """Install the Marathon package for DC/OS. """ # Install shakedown.install_package_and_wait(PACKAGE_NAME) assert shakedown.package_installed(PACKAGE_NAME), 'Package failed to install' end_time = time.time() + WAIT_TIME_IN_SECS found = False while time.time() < end_time: found = shakedown.get_service(PACKAGE_NAME) is not None if found and shakedown.service_healthy(SERVICE_NAME): break time.sleep(1) assert found, 'Service did not register with DCOS' shakedown.deployment_wait() # Uninstall uninstall('marathon-user') shakedown.deployment_wait() # Reinstall shakedown.install_package_and_wait(PACKAGE_NAME) assert shakedown.package_installed(PACKAGE_NAME), 'Package failed to reinstall' # try: shakedown.install_package(PACKAGE_NAME) except Exception as e: pass else: # Exception is not raised -> exit code was 0 assert False, "Error: CLI returns 0 when asked to install Marathon"
def setup_history_server(hdfs_with_kerberos, setup_hdfs_client, configure_universe): try: sdk_auth.kinit(HDFS_CLIENT_ID, keytab="hdfs.keytab", principal=GENERIC_HDFS_USER_PRINCIPAL) hdfs_cmd("mkdir /history") hdfs_cmd("chmod 1777 /history") shakedown.install_package( package_name=utils.HISTORY_PACKAGE_NAME, options_json={ "service": { "user": SPARK_HISTORY_USER, "hdfs-config-url": "http://api.{}.marathon.l4lb.thisdcos.directory/v1/endpoints" .format(HDFS_SERVICE_NAME) }, "security": { "kerberos": { "enabled": True, "krb5conf": utils.HDFS_KRB5_CONF, "principal": GENERIC_HDFS_USER_PRINCIPAL, "keytab": KEYTAB_SECRET_PATH } } }, wait_for_completion=True # wait for it to become healthy ) yield finally: sdk_marathon.destroy_app(utils.HISTORY_SERVICE_NAME)
def test_marathon_rack_not_found(): def fun(): try: return service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) except: return False shakedown.install_package( PACKAGE_NAME, service_name=SERVICE_NAME, options_json=install.get_package_options(additional_options={ 'service': { 'placement_constraint': 'rack_id:LIKE:rack-foo-.*' } }), wait_for_completion=False) try: tasks.check_running(PACKAGE_NAME, 1, timeout_seconds=120) assert False, "Should have failed to install" except AssertionError as arg: raise arg except: pass # expected to fail pl = spin.time_wait_return(fun) # check that first node is still (unsuccessfully) looking for a match: assert pl['status'] == 'IN_PROGRESS' assert pl['phases'][0]['status'] == 'IN_PROGRESS' # if so early, it can be PREPARED ? assert pl['phases'][0]['steps'][0]['status'] in ('PREPARED', 'PENDING') assert pl['phases'][0]['steps'][1]['status'] == 'PENDING' assert pl['phases'][0]['steps'][2]['status'] == 'PENDING' install.uninstall(SERVICE_NAME, PACKAGE_NAME)
def _install_spark(): options = { "hdfs": { "config-url": "http://api.hdfs.marathon.l4lb.thisdcos.directory/v1/endpoints" } } if _is_strict(): options['service'] = {"user": "******", "principal": "service-acct"} options['security'] = { "mesos": { "authentication": { "secret_name": "secret" } } } shakedown.install_package('spark', options_json=options, wait_for_completion=True) def pred(): dcos_url = dcos.config.get_config_val("core.dcos_url") spark_url = urllib.parse.urljoin(dcos_url, "/service/spark") status_code = dcos.http.get(spark_url).status_code return status_code == 200 shakedown.spinner.wait_for(pred)
def install(package_name, running_task_count, service_name=None, additional_options={}, package_version=None): if not service_name: service_name = package_name start = time.time() merged_options = get_package_options(additional_options) print('Installing {} with options={} version={}'.format(package_name, merged_options, package_version)) # install_package_and_wait silently waits for all marathon deployments to clear. # to give some visibility, install in the following order: # 1. install package shakedown.install_package(package_name, package_version=package_version, options_json=merged_options) # 2. wait for expected tasks to come up print("Waiting for expected tasks to come up...") sdk_tasks.check_running(service_name, running_task_count) # 3. check service health marathon_client = dcos.marathon.create_client() def fn(): # TODO(nickbp): upstream fix to shakedown, which currently checks for ANY deployments rather # than the one we care about deploying_apps = set([]) print("Getting deployments") deployments = marathon_client.get_deployments() print("Found {} deployments".format(len(deployments))) for d in deployments: print("Deployment: {}".format(d)) for a in d.get('affectedApps', []): print("Adding {}".format(a)) deploying_apps.add(a) print('Checking deployment of {} has ended:\n- Deploying apps: {}'.format(service_name, deploying_apps)) return not '/{}'.format(service_name) in deploying_apps sdk_spin.time_wait_noisy(lambda: fn(), timeout_seconds=30) print('Install done after {}'.format(sdk_spin.pretty_time(time.time() - start)))
def test_history_kdc_config(hdfs_with_kerberos): history_service_with_kdc_config = "spark-history-with-kdc-config" try: # This deployment will fail if kerberos is not configured properly. shakedown.install_package( package_name=utils.HISTORY_PACKAGE_NAME, options_json={ "service": { "name": history_service_with_kdc_config, "user": SPARK_HISTORY_USER, "hdfs-config-url": "http://api.{}.marathon.l4lb.thisdcos.directory/v1/endpoints" .format(HDFS_SERVICE_NAME) }, "security": { "kerberos": { "enabled": True, "kdc": { "hostname": hdfs_with_kerberos.get_host(), "port": int(hdfs_with_kerberos.get_port()) }, "realm": sdk_auth.REALM, "principal": GENERIC_HDFS_USER_PRINCIPAL, "keytab": KEYTAB_SECRET_PATH } } }, wait_for_completion=True, # wait for it to become healthy timeout_sec=240) finally: sdk_marathon.destroy_app(history_service_with_kdc_config)
def setup_module(module): install.uninstall(PACKAGE_NAME) options = install.get_package_options( {"service": { "spec_file": "examples/taskcfg.yml" }}) # don't wait for install to complete successfully: shakedown.install_package(PACKAGE_NAME, options_json=options)
def deploy_dispatchers(num_dispatchers, service_name_base, output_file, options, options_file=None, package_repo=None, quota_dispatcher_cpus=1, quota_dispatcher_gpus=0, quota_dispatcher_mem=2048.0, quota_driver_cpus=1, quota_driver_gpus=0, quota_driver_mem=1024.0): with open(output_file, "w") as outfile: shakedown.run_dcos_command("package install spark --cli --yes") for i in range(0, num_dispatchers): service_name = "{}-{}".format(service_name_base, str(i)) # set service name options["service"]["name"] = service_name if package_repo is not None: if package_repo not in [ x['uri'] for x in shakedown.get_package_repos()['repositories'] ]: shakedown.add_package_repo( repo_name="{}-repo".format(service_name_base), repo_url=package_repo) # create dispatcher & driver role quotas dispatcher_role = "{}-dispatcher-role".format(service_name) create_quota(name=dispatcher_role, cpus=quota_dispatcher_cpus, gpus=quota_dispatcher_gpus, mem=quota_dispatcher_mem) driver_role = "{}-driver-role".format(service_name) create_quota(name=driver_role, cpus=quota_driver_cpus, gpus=quota_driver_gpus, mem=quota_driver_mem) # install dispatcher with appropriate role options["service"]["role"] = dispatcher_role if options_file is not None: shakedown.install_package( package_name=arguments['--package-name'], service_name=service_name, options_file=options_file) else: shakedown.install_package( package_name=arguments['--package-name'], service_name=service_name, options_json=options) outfile.write("{},{},{}\n".format(service_name, dispatcher_role, driver_role))
def setup_module(module): install.uninstall(SERVICE_NAME, PACKAGE_NAME) utils.gc_frameworks() shakedown.install_package(PACKAGE_NAME, service_name=SERVICE_NAME, options_json=install.get_package_options( additional_options=DEPLOY_STRATEGY_SERIAL_CANARY ), wait_for_completion=True)
def test_custom_service_name(): """ Install MoM with a custom service name. """ cosmos_pm = packagemanager.PackageManager(cosmos.get_cosmos_url()) pkg = cosmos_pm.get_package_version('marathon', None) options = {'service': {'name': "test-marathon"}} shakedown.install_package('marathon', options_json=options) shakedown.deployment_wait() assert shakedown.wait_for_service_endpoint('test-marathon')
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) options = sdk_install.get_package_options({ "service": { "spec_file": "examples/taskcfg.yml" } }) # don't wait for install to complete successfully: shakedown.install_package(config.PACKAGE_NAME, options_json=options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def retried_shakedown_install(package_name, service_name, package_version, merged_options, timeout_seconds, expected_running_tasks): shakedown.install_package(package_name, package_version=package_version, service_name=service_name, options_json=merged_options, wait_for_completion=True, timeout_sec=timeout_seconds, expected_running_tasks=expected_running_tasks)
def test_xpack_toggle_with_kibana(default_populated_index): log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status( False, service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Test kibana with X-Pack disabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200) }}) shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME)) log.info("Uninstall kibana with X-Pack disabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Set/verify X-Pack enabled in elasticsearch") config.enable_xpack(service_name=FOLDERED_SERVICE_NAME) config.verify_commercial_api_status( True, service_name=FOLDERED_SERVICE_NAME) config.verify_xpack_license(service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Write some data while enabled, disable X-Pack, and verify we can still read what we wrote.") config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, {"name": "X-Pack", "role": "commercial plugin"}, service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Test kibana with X-Pack enabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(FOLDERED_SERVICE_NAME, "coordinator", 9200), "xpack_enabled": True }}) log.info("\n***** Installing Kibana w/X-Pack can take as much as 15 minutes for Marathon deployment ") log.info( "to complete due to a configured HTTP health check. (typical: 12 minutes)") shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/login".format(config.KIBANA_PACKAGE_NAME)) log.info("\n***** Uninstall kibana with X-Pack enabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Disable X-Pack in elasticsearch.") config.disable_xpack(service_name=FOLDERED_SERVICE_NAME) log.info("\n***** Verify we can still read what we wrote when X-Pack was enabled.") config.verify_commercial_api_status( False, service_name=FOLDERED_SERVICE_NAME) doc = config.get_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, service_name=FOLDERED_SERVICE_NAME) assert doc["_source"]["name"] == "X-Pack"
def install(package_version=None, package_name=PACKAGE_NAME, additional_options={}, wait_for_completion=True): merged_options = _nested_dict_merge(DEFAULT_OPTIONS_DICT, additional_options) print('Installing {} with options: {}'.format(PACKAGE_NAME, merged_options)) shakedown.install_package(package_name=PACKAGE_NAME, package_version=package_version, options_json=merged_options, wait_for_completion=wait_for_completion)
def test_custom_service_name(): """ Install MoM with a custom service name. """ cosmos = packagemanager.PackageManager(get_cosmos_url()) pkg = cosmos.get_package_version('marathon', None) options = { 'service': {'name': "test-marathon"} } shakedown.install_package('marathon', options_json=options) shakedown.deployment_wait() assert shakedown.wait_for_service_endpoint('test-marathon')
def _require_package(pkg_name, options={}): pkg_manager = dcos.package.get_package_manager() installed_pkgs = dcos.package.installed_packages(pkg_manager, None, None, False) if any(pkg['name'] == pkg_name for pkg in installed_pkgs): LOGGER.info("Package {} already installed.".format(pkg_name)) else: LOGGER.info("Installing package {}".format(pkg_name)) shakedown.install_package(pkg_name, options_json=options, wait_for_completion=True)
def configure_package(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME) options = sdk_install.get_package_options( {"service": { "spec_file": "examples/taskcfg.yml" }}) # don't wait for install to complete successfully: shakedown.install_package(config.PACKAGE_NAME, options_json=options) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME)
def test_custom_service_name(): """ Install MoM with a custom service name. """ cosmos_pm = packagemanager.PackageManager(cosmos.get_cosmos_url()) cosmos_pm.get_package_version('marathon', None) options = {'service': {'name': "test-marathon"}} shakedown.install_package('marathon', options_json=options) common.deployment_wait(service_id=options["service"]["name"], max_attempts=300) common.wait_for_service_endpoint('test-marathon', timeout_sec=300, path="ping")
def test_xpack_toggle_with_kibana(default_populated_index): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) log.info("\n***** Verify X-Pack disabled by default in elasticsearch") config.verify_commercial_api_status(False, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack disabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": {"elasticsearch_url": "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200)}}) shakedown.deployment_wait( app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration( "service/{}/".format(config.KIBANA_PACKAGE_NAME)) log.info("Uninstall kibana with X-Pack disabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Set/verify X-Pack enabled in elasticsearch. Requires parallel upgrade strategy for full restart.") config.enable_xpack(service_name=foldered_name) config.verify_commercial_api_status(True, service_name=foldered_name) config.verify_xpack_license(service_name=foldered_name) log.info("\n***** Write some data while enabled, disable X-Pack, and verify we can still read what we wrote.") config.create_document( config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, {"name": "X-Pack", "role": "commercial plugin"}, service_name=foldered_name) log.info("\n***** Test kibana with X-Pack enabled...") shakedown.install_package(config.KIBANA_PACKAGE_NAME, options_json={ "kibana": { "elasticsearch_url": "http://" + sdk_hosts.vip_host(foldered_name, "coordinator", 9200), "xpack_enabled": True }}) log.info("\n***** Installing Kibana w/X-Pack can take as much as 15 minutes for Marathon deployment ") log.info("to complete due to a configured HTTP health check. (typical: 12 minutes)") shakedown.deployment_wait(app_id="/{}".format(config.KIBANA_PACKAGE_NAME), timeout=config.DEFAULT_KIBANA_TIMEOUT) config.check_kibana_adminrouter_integration("service/{}/login".format(config.KIBANA_PACKAGE_NAME)) log.info("\n***** Uninstall kibana with X-Pack enabled") sdk_install.uninstall(config.KIBANA_PACKAGE_NAME, config.KIBANA_PACKAGE_NAME) log.info("\n***** Disable X-Pack in elasticsearch.") config.disable_xpack(service_name=foldered_name) log.info("\n***** Verify we can still read what we wrote when X-Pack was enabled.") config.verify_commercial_api_status(False, service_name=foldered_name) doc = config.get_document(config.DEFAULT_INDEX_NAME, config.DEFAULT_INDEX_TYPE, 2, service_name=foldered_name) assert doc["_source"]["name"] == "X-Pack" # reset upgrade strategy to serial config.update_app(foldered_name, {'UPDATE_STRATEGY': 'serial'}, current_expected_task_count)
def retried_shakedown_install( package_name, service_name, package_version, merged_options, timeout_seconds, expected_running_tasks): shakedown.install_package( package_name, package_version=package_version, service_name=service_name, options_json=merged_options, wait_for_completion=True, timeout_sec=timeout_seconds, expected_running_tasks=expected_running_tasks)
def _require_package(pkg_name, service_name=None, options={}): pkg_manager = dcos.package.get_package_manager() installed_pkgs = dcos.package.installed_packages(pkg_manager, None, None, False) pkg = next((pkg for pkg in installed_pkgs if pkg['name'] == pkg_name), None) if (pkg is not None) and (service_name is None): LOGGER.info("Package {} is already installed.".format(pkg_name)) elif (pkg is not None) and (service_name in pkg['apps']): LOGGER.info("Package {} with app_id={} is already installed.".format( pkg_name, service_name)) else: LOGGER.info("Installing package {}".format(pkg_name)) shakedown.install_package(pkg_name, options_json=options, wait_for_completion=True)
def test_neo4j_universe_package_install(neo_package): """ Neo4j used to be 1 of the universe packages tested above, largely because there was a bug in marathon for a short period of time which was realized through neo4j. However neo4j is so strongly different that we can't test it like the other services. It is NOT a framework so framework health checks do not work with neo4j. """ package = neo_package shakedown.install_package(package) shakedown.deployment_wait(timeout=timedelta(minutes=5).total_seconds(), app_id='neo4j/core') assert shakedown.package_installed(package), 'Package failed to install' marathon_client = marathon.create_client() tasks = marathon_client.get_tasks('neo4j/core') for task in tasks: assert task['healthCheckResults'][0]['lastSuccess'] is not None, 'Healthcheck was not successful' assert task['healthCheckResults'][0]['consecutiveFailures'] == 0, 'Healthcheck had consecutive failures'
def install(package_name, running_task_count, service_name=None, additional_options={}, package_version=None): if not service_name: service_name = package_name start = time.time() merged_options = get_package_options(additional_options) print('Installing {} with options={} version={}'.format( package_name, merged_options, package_version)) # install_package_and_wait silently waits for all marathon deployments to clear. # to give some visibility, install in the following order: # 1. install package shakedown.install_package(package_name, package_version=package_version, options_json=merged_options) # 2. wait for expected tasks to come up print("Waiting for expected tasks to come up...") sdk_tasks.check_running(service_name, running_task_count) # 3. check service health marathon_client = dcos.marathon.create_client() def fn(): # TODO(nickbp): upstream fix to shakedown, which currently checks for ANY deployments rather # than the one we care about deploying_apps = set([]) print("Getting deployments") deployments = marathon_client.get_deployments() print("Found {} deployments".format(len(deployments))) for d in deployments: print("Deployment: {}".format(d)) for a in d.get('affectedApps', []): print("Adding {}".format(a)) deploying_apps.add(a) print('Checking deployment of {} has ended:\n- Deploying apps: {}'. format(service_name, deploying_apps)) return not '/{}'.format(service_name) in deploying_apps sdk_spin.time_wait_noisy(lambda: fn(), timeout_seconds=30) print('Install done after {}'.format( sdk_spin.pretty_time(time.time() - start)))
def install(package_name, running_task_count, service_name=None, additional_options={}, package_version=None, check_suppression=True, timeout_seconds=15 * 60): if not service_name: service_name = package_name start = time.time() merged_options = get_package_options(additional_options) sdk_utils.out('Installing {} with options={} version={}'.format( package_name, merged_options, package_version)) # 1. Install package, wait for tasks, wait for marathon deployment shakedown.install_package(package_name, package_version=package_version, service_name=service_name, options_json=merged_options, wait_for_completion=True, timeout_sec=timeout_seconds, expected_running_tasks=running_task_count) # 2. Ensure the framework is suppressed. # # This is only configurable in order to support installs from # Universe during the upgrade_downgrade tests, because currently # the suppression endpoint isn't supported by all frameworks in # Universe. It can be removed once all frameworks rely on # dcos-commons >= 0.13. if check_suppression: sdk_utils.out("Waiting for framework to be suppressed...") shakedown.wait_for(lambda: sdk_api.is_suppressed(service_name), noisy=True, timeout_seconds=5 * 60) sdk_utils.out('Install done after {}'.format( shakedown.pretty_duration(time.time() - start)))
def test_kibana_installation_with_xpack(): # Note that this test may take 20-30 minutes to run. # Kibana needs to be able to connect to an elasticsearch w/x-pack cluster in order to return success on /login # Otherwise it will generate infinite 302 redirects. options = { "elasticsearch": { "xpack_enabled": True } } shakedown.install_package(ELASTIC_PACKAGE_NAME, options_json=options) tasks.check_running(ELASTIC_PACKAGE_NAME, DEFAULT_TASK_COUNT) wait_for_expected_nodes_to_exist() options = { "kibana": { "xpack_enabled": True } } path = "/service/kibana/login" # installing Kibana w/x-pack can take 15 minutes test_kibana(options, path) install.uninstall(ELASTIC_PACKAGE_NAME)
def test_kibana(options, path): shakedown.install_package(PACKAGE_NAME, options_json=options) shakedown.deployment_wait(timeout=KIBANA_WAIT_TIME_IN_SECONDS, app_id="/{}".format(PACKAGE_NAME)) check_kibana_adminrouter_integration(path) install.uninstall(PACKAGE_NAME)
def setup_module(module): install.uninstall(PACKAGE_NAME) options = install.get_package_options({ "service": { "spec_file": "examples/taskcfg.yml" } }) # don't wait for install to complete successfully: shakedown.install_package(PACKAGE_NAME, options_json=options)