def test_upgrade_downgrade(): # Ensure both Universe and the test repo exist. # In particular, the Framework Test Suite only runs packages from Universe; # it doesn't add a test repo like the PR jobs. if len(shakedown.get_package_repos()['repositories']) != 2: print('No test repo found. Skipping test_upgrade_downgrade') return test_repo_name, test_repo_url = get_test_repo_info() test_version = get_pkg_version() print('Found test version: {}'.format(test_version)) remove_repo(test_repo_name, test_version) master_version = get_pkg_version() print('Found master version: {}'.format(master_version)) print('Installing master version') install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=master_version) print('Upgrading to test version') marathon.destroy_app(PACKAGE_NAME) add_repo(test_repo_name, test_repo_url, prev_version=master_version) install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=test_version) print('Downgrading to master version') marathon.destroy_app(PACKAGE_NAME) install.install(PACKAGE_NAME, DEFAULT_TASK_COUNT, package_version=master_version)
def test_upgrade_downgrade(): # Ensure both Universe and the test repo exist. if len(shakedown.get_package_repos()['repositories']) != 2: print('No cassandra test repo found. Skipping test_upgrade_downgrade') return test_repo_name, test_repo_url = get_test_repo_info() test_version = get_pkg_version() print('Found test version: {}'.format(test_version)) remove_repo(test_repo_name, test_version) master_version = get_pkg_version() print('Found master version: {}'.format(master_version)) print('Installing master version') install(package_version=master_version) check_health() plan = infinity_commons.get_and_verify_plan( lambda p: p['status'] == infinity_commons.PlanState.COMPLETE.value) assert plan['status'] == infinity_commons.PlanState.COMPLETE.value # TODO: write some data print('Upgrading to test version') destroy_service() add_repo(test_repo_name, test_repo_url, master_version) install(package_version=test_version) check_post_version_change_health() print('Downgrading to master version') destroy_service() install(package_version=master_version) check_post_version_change_health()
def test_upgrade_downgrade(): # Ensure both Universe and the test repo exist. @mgummelt if len(shakedown.get_package_repos()['repositories']) != 2: print('No kafka test repo found. Skipping test_upgrade_downgrade') return test_repo_name, test_repo_url = get_test_repo_info() test_version = get_pkg_version() print('Found test version: {}'.format(test_version)) remove_repo(test_repo_name, test_version) master_version = get_pkg_version() print('Found master version: {}'.format(master_version)) print('Installing master version') install(package_version=master_version) check_health() plan = get_plan(lambda p: p['status'] == 'COMPLETE') assert plan['status'] == 'COMPLETE' topics_are_available() write_messages() print('Upgrading to test version') destroy_service() add_repo(test_repo_name, test_repo_url, master_version) install(package_version=test_version) check_post_version_change_health() print('Downgrading to master version') destroy_service() install(package_version=master_version) check_post_version_change_health()
def test_upgrade_downgrade(): # Ensure both Universe and the test repo exist. if len(shakedown.get_package_repos()['repositories']) != 2: print('No test repo found. Skipping test_upgrade_downgrade') return test_version = get_pkg_version() print('Found test version: {}'.format(test_version)) add_repo(MASTER_CUSTOM_NAME, MASTER_CUSTOM_URL, prev_version=test_version) master_version = get_pkg_version() print('Found master version: {}'.format(master_version)) print('Installing master version') install({'package_version': master_version}) check_health() write_some_data("data-0-node.hdfs.mesos", TEST_FILE_NAME) # gives chance for write to succeed and replication to occur time.sleep(5) print('Upgrading to test version') destroy_and_install(test_version) check_health_after_version_change() print('Downgrading to master version') destroy_and_install(master_version) check_health_after_version_change() # clean up remove_repo(prev_version=master_version)
def deploy_dispatchers(num_dispatchers, service_name_base, output_file, options, options_file=None, package_repo=None, quota_dispatcher_cpus=1, quota_dispatcher_gpus=0, quota_dispatcher_mem=2048.0, quota_driver_cpus=1, quota_driver_gpus=0, quota_driver_mem=1024.0): with open(output_file, "w") as outfile: shakedown.run_dcos_command("package install spark --cli --yes") for i in range(0, num_dispatchers): service_name = "{}-{}".format(service_name_base, str(i)) # set service name options["service"]["name"] = service_name if package_repo is not None: if package_repo not in [ x['uri'] for x in shakedown.get_package_repos()['repositories'] ]: shakedown.add_package_repo( repo_name="{}-repo".format(service_name_base), repo_url=package_repo) # create dispatcher & driver role quotas dispatcher_role = "{}-dispatcher-role".format(service_name) create_quota(name=dispatcher_role, cpus=quota_dispatcher_cpus, gpus=quota_dispatcher_gpus, mem=quota_dispatcher_mem) driver_role = "{}-driver-role".format(service_name) create_quota(name=driver_role, cpus=quota_driver_cpus, gpus=quota_driver_gpus, mem=quota_driver_mem) # install dispatcher with appropriate role options["service"]["role"] = dispatcher_role if options_file is not None: shakedown.install_package( package_name=arguments['--package-name'], service_name=service_name, options_file=options_file) else: shakedown.install_package( package_name=arguments['--package-name'], service_name=service_name, options_json=options) outfile.write("{},{},{}\n".format(service_name, dispatcher_role, driver_role))
def get_test_repo_info(): repos = shakedown.get_package_repos() test_repo = repos['repositories'][0] return test_repo['name'], test_repo['uri']
def get_repo_list(): return shakedown.get_package_repos()['repositories']