def test_deploy(): # taskcfg.yml will initially fail to deploy because several options are missing in the default # marathon.json.mustache. verify that tasks are failing for 30s before continuing. print('Checking that tasks are failing to launch for at least 30s') end_time = time.time() + 30 # we can get brief blips of TASK_RUNNING but they shouldnt last more than 2-3s: consecutive_task_running = 0 while time.time() < end_time: try: tasks = shakedown.get_service_tasks(PACKAGE_NAME) except Exception as e: continue states = [t['state'] for t in tasks] print('Task states: {}'.format(states)) if TASK_RUNNING_STATE in states: consecutive_task_running += 1 assert consecutive_task_running <= 3 else: consecutive_task_running = 0 time.sleep(1) # add the needed envvars in marathon and confirm that the deployment succeeds: config = get_marathon_config() env = config['env'] del env['SLEEP_DURATION'] env['TASKCFG_ALL_OUTPUT_FILENAME'] = 'output' env['TASKCFG_ALL_SLEEP_DURATION'] = '1000' request(dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) check_health()
def destroy_service(): destroy_endpoint = marathon_api_url_with_param('apps', PACKAGE_NAME) request(dcos.http.delete, destroy_endpoint) # Make sure the scheduler has been destroyed def fn(): shakedown.get_service(PACKAGE_NAME) def success_predicate(service): return (service == None, 'Service not destroyed') spin(fn, success_predicate)
def test_bump_hello_cpus(): check_health() hello_ids = get_task_ids('hello') print('hello ids: ' + str(hello_ids)) config = get_marathon_config() cpus = float(config['env']['HELLO_CPUS']) config['env']['HELLO_CPUS'] = str(cpus + 0.1) request(dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) tasks_updated('hello', hello_ids) check_health()
def test_bump_hello_nodes(): check_health() hello_ids = get_task_ids('hello') print('hello ids: ' + str(hello_ids)) config = get_marathon_config() nodeCount = int(config['env']['HELLO_COUNT']) + 1 config['env']['HELLO_COUNT'] = str(nodeCount) request(dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) check_health() tasks_not_updated('hello', hello_ids)
def increment_broker_port_config(): config = get_kafka_config() config['env']['BROKER_PORT'] = str(int(config['env']['BROKER_PORT']) + 1) r = request( dcos.http.put, marathon_api_url('apps/kafka'), json=config, )
def test_can_adjust_config_from_dynamic_to_dynamic_port(): check_health() connections = get_connection_info()['address'] config = get_kafka_config() brokerCpus = int(config['env']['BROKER_CPUS']) config['env']['BROKER_CPUS'] = str(brokerCpus + 0.1) r = request(dcos.http.put, marathon_api_url('apps/kafka'), json=config) check_health()
def test_can_adjust_config_from_dynamic_to_dynamic_port(): check_health() connections = get_connection_info()['address'] config = get_kafka_config() config['env']['KAFKA_VER_NAME'] = 'kafka-nonce-ver' r = request(dcos.http.put, marathon_api_url('apps/kafka'), json=config) check_health() result = get_connection_info() assert (set([a.split(':')[-1] for a in result['address'] ]) == set([a.split(':')[-1] for a in connections]))
def test_can_adjust_config_from_static_to_dynamic_port(): check_health() config = get_kafka_config() config['env']['BROKER_PORT'] = '0' r = request(dcos.http.put, marathon_api_url('apps/kafka'), json=config) check_health() result = get_connection_info() assert len(result['address']) == 3 for hostport in result['address']: assert 9092 != int(hostport.split(':')[-1])
def test_bump_data_nodes(): check_health() data_ids = get_task_ids('data') print('data ids: ' + str(data_ids)) config = get_marathon_config() nodeCount = int(config['env']['DATA_COUNT']) + 1 config['env']['DATA_COUNT'] = str(nodeCount) r = request( dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) check_health(DEFAULT_HDFS_TASK_COUNT + 1) tasks_not_updated('data', data_ids)
def test_bump_journal_cpus(): check_health() journal_ids = get_task_ids('journal') print('journal ids: ' + str(journal_ids)) config = get_marathon_config() print('marathon config: ') print(config) cpus = float(config['env']['JOURNAL_CPUS']) config['env']['JOURNAL_CPUS'] = str(cpus + 0.1) r = request( dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) tasks_updated('journal', journal_ids) check_health()
def test_bump_world_nodes(): check_health() hello_ids = get_task_ids('hello') print('hello ids: ' + str(hello_ids)) world_ids = get_task_ids('world') print('world ids: ' + str(world_ids)) config = get_marathon_config() worldNodeCount = int(config['env']['WORLD_COUNT']) + 1 config['env']['WORLD_COUNT'] = str(worldNodeCount) r = request(dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) check_health(DEFAULT_TASK_COUNT + 1) tasks_not_updated('hello', hello_ids) tasks_not_updated('world', world_ids)
def test_bump_metadata_cpus(): check_health() meta_data_ids = get_task_ids('meta-data') print('meta-data ids: ' + str(meta_data_ids)) data_ids = get_task_ids('data') print('data ids: ' + str(data_ids)) config = get_marathon_config() cpus = float(config['env']['METADATA_CPU']) config['env']['METADATA_CPU'] = str(cpus + 0.1) r = request(dcos.http.put, marathon_api_url('apps/' + PACKAGE_NAME), json=config) tasks_updated('meta-data', meta_data_ids) tasks_not_updated('data', data_ids) check_health()
def fn(): try: request(dcos.http.delete, destroy_endpoint) return True except dcos.errors.DCOSHTTPException: return False