def test_zones_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": {"name": foldered_name, "placement_constraint": '[["@zone", "GROUP_BY"]]'} }, ) test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) _, broker_ids, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "broker list", parse_json=True) for broker_id in broker_ids: _, broker_info, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, "broker get {}".format(broker_id), parse_json=True ) assert sdk_fault_domain.is_valid_zone(broker_info.get("rack")) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_custom_zookeeper(): broker_ids = sdk_tasks.get_task_ids( FOLDERED_SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: sdk_cmd.svc_cli( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'topic create {}'.format(config.DEFAULT_TOPIC_NAME), json=True) assert sdk_cmd.svc_cli( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'topic list', json=True) == [config.DEFAULT_TOPIC_NAME] marathon_config = sdk_marathon.get_config(FOLDERED_SERVICE_NAME) # should be using default path when this envvar is empty/unset: assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == '' # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(ZK_SERVICE_PATH) marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path sdk_marathon.update_app(FOLDERED_SERVICE_NAME, marathon_config) sdk_tasks.check_tasks_updated( FOLDERED_SERVICE_NAME, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(FOLDERED_SERVICE_NAME) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=FOLDERED_SERVICE_NAME) zookeeper = sdk_cmd.svc_cli( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'endpoints zookeeper') assert zookeeper.rstrip('\n') == zk_path # topic created earlier against default zk should no longer be present: assert sdk_cmd.svc_cli(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, 'topic list', json=True) == []
def kafka_server(zookeeper_server): try: # Get the zookeeper DNS values zookeeper_dns = sdk_cmd.svc_cli( zookeeper_server["package_name"], zookeeper_server["service"]["name"], "endpoint clientport", parse_json=True, )[1]["dns"] sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "kafka": { "kafka_zookeeper_uri": ",".join(zookeeper_dns) } }, ) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == '' # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name)) marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path sdk_marathon.update_app(foldered_name, marathon_config) sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper') assert zookeeper.rstrip('\n') == zk_path # topic created earlier against default zk should no longer be present: topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True) test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
def test_zones_not_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={"service": { "name": foldered_name }}) test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert broker_info.get('rack') is None sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_zones_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install(config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name, "placement_constraint": "[[\"@zone\", \"GROUP_BY\"]]" } }) test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert sdk_fault_domain.is_valid_zone(broker_info.get('rack')) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_zones_not_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name } }) test_utils.broker_count_check( config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert broker_info.get('rack') == None sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_zones_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name, "placement_constraint": "[[\"@zone\", \"GROUP_BY\"]]" } }) test_utils.broker_count_check( config.DEFAULT_BROKER_COUNT, service_name=foldered_name) broker_ids = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker list', json=True) for broker_id in broker_ids: broker_info = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'broker get {}'.format(broker_id), json=True) assert sdk_fault_domain.is_valid_zone(broker_info.get('rack')) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_zones_referenced_in_placement_constraints(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name, "placement_constraint": '[["@zone", "GROUP_BY"]]' } }, ) test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "broker list") assert rc == 0, "Broker list command failed" for broker_id in json.loads(stdout): rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "broker get {}".format(broker_id)) assert rc == 0, "Broker get command failed" assert sdk_fault_domain.is_valid_zone(json.loads(stdout).get("rack")) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def kafka_server(zookeeper_server): try: # Get the zookeeper DNS values zookeeper_dns = sdk_cmd.svc_cli(zookeeper_server["package_name"], zookeeper_server["service"]["name"], "endpoint clientport", json=True)["dns"] sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "kafka": { "kafka_zookeeper_uri": ",".join(zookeeper_dns) } }) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_pods_replace(): broker_0_id = tasks.get_task_ids(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME)) service_cli('pods replace {}-0'.format(DEFAULT_POD_TYPE)) tasks.check_tasks_updated(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME), broker_0_id) tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT) # wait till all brokers register broker_count_check(DEFAULT_BROKER_COUNT)
def configure_package(configure_zookeeper): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) zookeeper_framework_host = "{}.autoip.dcos.thisdcos.directory:1140".format( ZK_SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={ "kafka": { "kafka_zookeeper_uri": "zookeeper-0-server.{host},zookeeper-0-server.{host},zookeeper-0-server.{host}" .format(host=zookeeper_framework_host) } }) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_security): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name }, "brokers": { "cpus": 0.5 } }, ) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def configure_package(configure_security): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) if shakedown.dcos_version_less_than("1.9"): # Last beta-kafka release (1.1.25-0.10.1.0-beta) excludes 1.8. Skip upgrade tests with 1.8 and just install sdk_install.install( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={"service": { "name": foldered_name }}) else: sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={"service": { "name": foldered_name }}) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config['env']['KAFKA_ZOOKEEPER_URI'] == '' # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = 'master.mesos:2181/{}/CUSTOMPATH'.format(sdk_utils.get_zk_path(foldered_name)) marathon_config['env']['KAFKA_ZOOKEEPER_URI'] = zk_path sdk_marathon.update_app(foldered_name, marathon_config) sdk_tasks.check_tasks_updated(foldered_name, '{}-'.format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'endpoints zookeeper') assert zookeeper.rstrip('\n') == zk_path # topic created earlier against default zk should no longer be present: topic_list_info = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, 'topic list', json=True) test_utils.assert_topic_lists_are_equal_without_automatic_topics([], topic_list_info)
def configure_package(configure_security): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) install_kafka() # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_canary_second(): service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT) broker_count_check(DEFAULT_BROKER_COUNT) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'COMPLETE' assert pl['phases'][0]['status'] == 'COMPLETE' for step in range(DEFAULT_BROKER_COUNT): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE'
def test_mesos_v0_api(): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) # Install Hello World using the v0 api. # Then, clean up afterwards. sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) install_kafka(use_v0=True) sdk_tasks.check_running(foldered_name, config.DEFAULT_BROKER_COUNT) finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) install_kafka() # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name)
def kafka_server(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) # Since the tests below interact with the brokers, ensure that the DNS resolves test_utils.wait_for_broker_dns(config.PACKAGE_NAME, config.SERVICE_NAME) yield {"package_name": config.PACKAGE_NAME, "service": {"name": config.SERVICE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def configure_package(configure_security): try: foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) sdk_upgrade.test_upgrade( config.PACKAGE_NAME, foldered_name, config.DEFAULT_BROKER_COUNT, additional_options={"service": {"name": foldered_name}, "brokers": {"cpus": 0.5}}) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) yield # let the test session execute finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def kafka_server(configure_security): try: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME) config.install( config.PACKAGE_NAME, config.SERVICE_NAME, config.DEFAULT_BROKER_COUNT) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=config.SERVICE_NAME) # Since the tests below interact with the brokers, ensure that the DNS resolves test_utils.wait_for_broker_dns(config.PACKAGE_NAME, config.SERVICE_NAME) yield {"package_name": config.PACKAGE_NAME, "service": {"name": config.SERVICE_NAME}} finally: sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
def test_canary_first(): service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_running(SERVICE_NAME, 1) broker_count_check(1) # do not use service_plan always # when here, plan should always return properly pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'WAITING' assert pl['phases'][0]['status'] == 'WAITING' assert pl['phases'][0]['steps'][0]['status'] == 'COMPLETE' assert pl['phases'][0]['steps'][1]['status'] == 'WAITING' if DEFAULT_BROKER_COUNT >2: assert pl['phases'][0]['steps'][2]['status'] == 'PENDING'
def test_custom_zookeeper(): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) broker_ids = sdk_tasks.get_task_ids(foldered_name, "{}-".format(config.DEFAULT_POD_TYPE)) # create a topic against the default zk: test_utils.create_topic(config.DEFAULT_TOPIC_NAME, service_name=foldered_name) marathon_config = sdk_marathon.get_config(foldered_name) # should be using default path when this envvar is empty/unset: assert marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] == "" # use a custom zk path that's WITHIN the 'dcos-service-' path, so that it's automatically cleaned up in uninstall: zk_path = "master.mesos:2181/{}/CUSTOMPATH".format( sdk_utils.get_zk_path(foldered_name)) marathon_config["env"]["KAFKA_ZOOKEEPER_URI"] = zk_path sdk_marathon.update_app(marathon_config) sdk_tasks.check_tasks_updated(foldered_name, "{}-".format(config.DEFAULT_POD_TYPE), broker_ids) sdk_plan.wait_for_completed_deployment(foldered_name) # wait for brokers to finish registering test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) zookeeper = sdk_networks.get_endpoint_string(config.PACKAGE_NAME, foldered_name, "zookeeper") assert zookeeper == zk_path # topic created earlier against default zk should no longer be present: rc, stdout, _ = sdk_cmd.svc_cli(config.PACKAGE_NAME, foldered_name, "topic list") assert rc == 0, "Topic list command failed" test_utils.assert_topic_lists_are_equal_without_automatic_topics( [], json.loads(stdout))
def test_increase_count(): marathon.bump_task_count_config(SERVICE_NAME, 'BROKER_COUNT') try: tasks.check_running(PACKAGE_NAME, DEFAULT_BROKER_COUNT + 1, timeout_seconds=60) assert False, "Should not start task now" except AssertionError as arg: raise arg except: pass # expected to fail tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'WAITING' assert pl['phases'][0]['status'] == 'WAITING' for step in range(DEFAULT_BROKER_COUNT): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE' assert pl['phases'][0]['steps'][DEFAULT_BROKER_COUNT][ 'status'] == 'WAITING' service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT + 1) broker_count_check(DEFAULT_BROKER_COUNT + 1) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'COMPLETE' assert pl['phases'][0]['status'] == 'COMPLETE' for step in range(DEFAULT_BROKER_COUNT + 1): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE'
def test_increase_count(): config = marathon.get_config(SERVICE_NAME) config['env']['BROKER_COUNT'] = str(int(config['env']['BROKER_COUNT']) + 1) marathon.update_app(SERVICE_NAME, config) try: tasks.check_running(PACKAGE_NAME, DEFAULT_BROKER_COUNT + 1, timeout_seconds=60) assert False, "Should not start task now" except AssertionError as arg: raise arg except: pass # expected to fail tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'WAITING' assert pl['phases'][0]['status'] == 'WAITING' for step in range(DEFAULT_BROKER_COUNT): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE' assert pl['phases'][0]['steps'][DEFAULT_BROKER_COUNT]['status'] == 'WAITING' service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT + 1) broker_count_check(DEFAULT_BROKER_COUNT + 1) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'COMPLETE' assert pl['phases'][0]['status'] == 'COMPLETE' for step in range(DEFAULT_BROKER_COUNT + 1): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE'
def configure_package(configure_security): try: install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME) if shakedown.dcos_version_less_than("1.9"): # Last beta-kafka release (1.1.25-0.10.1.0-beta) excludes 1.8. Skip upgrade tests with 1.8 and just install install.install( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={"service": {"name": FOLDERED_SERVICE_NAME}}) else: sdk_upgrade.test_upgrade( config.PACKAGE_NAME, FOLDERED_SERVICE_NAME, config.DEFAULT_BROKER_COUNT, additional_options={"service": {"name": FOLDERED_SERVICE_NAME}}) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=FOLDERED_SERVICE_NAME) yield # let the test session execute finally: install.uninstall(config.PACKAGE_NAME, FOLDERED_SERVICE_NAME)
def test_upgrade_111_210(configure_security): foldered_name = sdk_utils.get_foldered_name(config.SERVICE_NAME) sdk_install.uninstall(config.PACKAGE_NAME, foldered_name) from_version = '2.4.0-1.1.1' to_version = 'stub-universe' # In case that 2.1.0 has been released upgrade we want to test to a specific # 2.1.0 version rather than to latest stub universe version. ret_code, stdout, _ = sdk_cmd.run_cli( cmd='package describe kafka --package-versions', ) assert ret_code == 0 # Filter old versions in custom format that doesn't matches the x.x.x-x.x.x package_versions = [ v for v in json.loads(stdout) if re.match(r'^([\d\.]+)\-([\d\.]+)$', v) ] version_210 = sorted( [v for v in package_versions if v.split('-')[1] == '2.1.0'], reverse=True, ) if len(version_210) > 0: to_version = version_210[0] try: sdk_install.install( package_name=config.PACKAGE_NAME, service_name=foldered_name, expected_running_tasks=config.DEFAULT_BROKER_COUNT, additional_options={ "service": { "name": foldered_name }, "brokers": { "cpus": 0.5 } }, package_version=from_version, ) # wait for brokers to finish registering before starting tests test_utils.broker_count_check(config.DEFAULT_BROKER_COUNT, service_name=foldered_name) # Assert that inter broker protocol is set to 1.x version _, options, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'describe', parse_json=True, ) assert options['kafka']['inter_broker_protocol_version'] == '1.0' task_ids = sdk_tasks.get_task_ids(foldered_name, "") # Run update to the new version with tempfile.NamedTemporaryFile() as opts_f: opts_f.write( json.dumps({ 'kafka': { 'inter_broker_protocol_version': '1.0' } }).encode('utf-8')) opts_f.flush() sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'update start --package-version={} --options={}'.format( to_version, opts_f.name, ), ) # we must manually upgrade the package CLI because it's not done automatically in this flow # (and why should it? that'd imply the package CLI replacing itself via a call to the main CLI...) sdk_cmd.run_cli( 'package install --yes --cli --package-version={} {}'.format( to_version, config.PACKAGE_NAME, )) sdk_tasks.check_tasks_updated(foldered_name, "", task_ids) # Assert that inter broker protocol is set to 1.x version _, options, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'describe', parse_json=True, ) assert options['kafka']['inter_broker_protocol_version'] == '1.0' # Update protocol to 2.1 (serially) by changing kafka configuration with tempfile.NamedTemporaryFile() as opts_f: options['kafka']['inter_broker_protocol_version'] = '2.1' opts_f.write(json.dumps(options).encode('utf-8')) opts_f.flush() sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, "update start --options={}".format(opts_f.name), ) # Assert that inter broker protocol is set to 2.1 version _, options, _ = sdk_cmd.svc_cli( config.PACKAGE_NAME, foldered_name, 'describe', parse_json=True, ) assert options['kafka']['inter_broker_protocol_version'] == '2.1' finally: sdk_install.uninstall(config.PACKAGE_NAME, foldered_name)
def test_increase_cpu(): def plan_waiting(): try: pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) if pl['status'] == 'WAITING': return True except: pass return False def plan_complete(): try: pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) if pl['status'] == 'COMPLETE': return True except: pass return False config = marathon.get_config(SERVICE_NAME) config['env']['BROKER_CPUS'] = str(0.1 + float(config['env']['BROKER_CPUS'])) marathon.update_app(SERVICE_NAME, config) spin.time_wait_return(plan_waiting) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'WAITING' assert pl['phases'][0]['status'] == 'WAITING' assert pl['phases'][0]['steps'][0]['status'] == 'WAITING' assert pl['phases'][0]['steps'][1]['status'] == 'WAITING' for step in range (2, DEFAULT_BROKER_COUNT +1 ): assert pl['phases'][0]['steps'][step]['status'] == 'PENDING' # all tasks are still running tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT + 1) broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME)) service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_tasks_updated(SERVICE_NAME, '{}-0-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME), broker_ids) tasks.check_running(SERVICE_NAME, DEFAULT_BROKER_COUNT + 1) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'WAITING' assert pl['phases'][0]['status'] == 'WAITING' assert pl['phases'][0]['steps'][0]['status'] == 'COMPLETE' assert pl['phases'][0]['steps'][1]['status'] == 'WAITING' for step in range(2, DEFAULT_BROKER_COUNT + 1): assert pl['phases'][0]['steps'][step]['status'] == 'PENDING' broker_ids = tasks.get_task_ids(SERVICE_NAME, '{}-1-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME)) service_cli('plan continue {} {}'.format(DEFAULT_PLAN_NAME, DEFAULT_PHASE_NAME)) tasks.check_tasks_updated(SERVICE_NAME, '{}-1-{}'.format(DEFAULT_POD_TYPE, DEFAULT_TASK_NAME), broker_ids) spin.time_wait_return(plan_complete) pl = service_cli('plan show {}'.format(DEFAULT_PLAN_NAME)) assert pl['status'] == 'COMPLETE' assert pl['phases'][0]['status'] == 'COMPLETE' for step in range(DEFAULT_BROKER_COUNT + 1): assert pl['phases'][0]['steps'][step]['status'] == 'COMPLETE' broker_count_check(DEFAULT_BROKER_COUNT + 1)