def teardown(force, ignore_deployments): logger = get_logger() management_ip = utils.get_management_server_ip() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) client = utils.get_rest_client(management_ip) try: if not ignore_deployments and len(client.deployments.list()) > 0: msg = \ ("Manager server {0} has existing deployments. Delete all " "deployments first or add the '--ignore-deployments' flag to " "your command to ignore these deployments and execute " "teardown.".format(management_ip)) raise exceptions.CloudifyCliError(msg) except IOError: msg = \ "Failed querying manager server {0} about existing " \ "deployments; The Manager server may be down. If you wish to " \ 'skip this check, you may use the "--ignore-deployments" flag, ' \ 'in which case teardown will occur regardless of the Manager ' \ "server's status.".format(management_ip) raise exceptions.CloudifyCliError(msg) logger.info("tearing down {0}".format(management_ip)) # runtime properties might have changed since the last time we # executed 'use', because of recovery. so we need to retrieve # the provider context again try: logger.info('Retrieving provider context') management_ip = utils.get_management_server_ip() use(management_ip, utils.get_rest_port()) except BaseException as e: logger.warning('Failed retrieving provider context: {0}. This ' 'may cause a leaking management server ' 'in case it has gone through a ' 'recovery process'.format(str(e))) # reload settings since the provider context maybe changed settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.teardown() # cleaning relevant data from working directory settings with utils.update_wd_settings() as wd_settings: # wd_settings.set_provider_context(provider_context) wd_settings.remove_management_server_context() logger.info("teardown complete")
def _do_teardown(): # reload settings since the provider context maybe changed settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed(provider_context.get("cloudify", {}).get("manager_deployment")) bs.teardown() # cleaning relevant data from working directory settings with utils.update_wd_settings() as wd_settings: # wd_settings.set_provider_context(provider_context) wd_settings.remove_management_server_context()
def _do_teardown(): # reload settings since the provider context maybe changed settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.teardown() # cleaning relevant data from working directory settings with utils.update_wd_settings() as wd_settings: # wd_settings.set_provider_context(provider_context) wd_settings.remove_management_server_context()
def teardown(force, ignore_deployments, config_file_path, ignore_validation): logger = get_logger() management_ip = utils.get_management_server_ip() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) client = utils.get_rest_client(management_ip) if not ignore_deployments and len(client.deployments.list()) > 0: msg = ("Management server {0} has active deployments. Add the " "'--ignore-deployments' flag to your command to ignore " "these deployments and execute topology teardown." .format(management_ip)) raise exceptions.CloudifyCliError(msg) settings = utils.load_cloudify_working_dir_settings() if settings.get_is_provider_config(): provider_common.provider_teardown(config_file_path, ignore_validation) else: logger.info("tearing down {0}".format(management_ip)) provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.teardown(name='manager', task_retries=0, task_retry_interval=0, task_thread_pool_size=1) # cleaning relevant data from working directory settings with utils.update_wd_settings() as wd_settings: # wd_settings.set_provider_context(provider_context) wd_settings.remove_management_server_context() logger.info("teardown complete")
def teardown(force, ignore_deployments, config_file_path, ignore_validation): logger = get_logger() management_ip = utils.get_management_server_ip() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) client = utils.get_rest_client(management_ip) if not ignore_deployments and len(client.deployments.list()) > 0: msg = ("Management server {0} has active deployments. Add the " "'--ignore-deployments' flag to your command to ignore " "these deployments and execute topology teardown.".format( management_ip)) raise exceptions.CloudifyCliError(msg) settings = utils.load_cloudify_working_dir_settings() if settings.get_is_provider_config(): provider_common.provider_teardown(config_file_path, ignore_validation) else: logger.info("tearing down {0}".format(management_ip)) provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.teardown(name='manager', task_retries=0, task_retry_interval=0, task_thread_pool_size=1) # cleaning relevant data from working directory settings with utils.update_wd_settings() as wd_settings: # wd_settings.set_provider_context(provider_context) wd_settings.remove_management_server_context() logger.info("teardown complete")
def bootstrap(keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() env_name = 'manager' # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('Executing bootstrap validation...') bs.bootstrap_validation( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver()) logger.info('Bootstrap validation completed successfully') if not validate_only: try: logger.info('Executing manager bootstrap...') details = bs.bootstrap( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details['rest_port']) ws_settings.set_protocol(details['protocol']) logger.info('Bootstrap complete') logger.info('Manager is up at {0}'.format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error('Bootstrap failed! ({0})'.format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info( 'Executing teardown due to failed bootstrap...') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def bootstrap( keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size, ): logger = get_logger() env_name = "manager" # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command' ) if not skip_validations: logger.info("Executing bootstrap validation...") bs.bootstrap_validation( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver(), ) logger.info("Bootstrap validation completed successfully") elif inputs: # The user expects that `--skip-validations` will also ignore # bootstrap validations and not only creation_validations inputs = common.add_ignore_bootstrap_validations_input(inputs) if not validate_only: try: logger.info("Executing manager bootstrap...") details = bs.bootstrap( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, ) manager_ip = details["manager_ip"] provider_context = details["provider_context"] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details["manager_key_path"]) ws_settings.set_management_user(details["manager_user"]) ws_settings.set_management_port(details["manager_port"]) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details["rest_port"]) ws_settings.set_rest_protocol(details["rest_protocol"]) logger.info("Bootstrap complete") logger.info("Manager is up at {0}".format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error("Bootstrap failed! ({0})".format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info("Executing teardown due to failed bootstrap...") bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def bootstrap(keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() env_name = 'manager' # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('executing bootstrap validation') bs.bootstrap_validation(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver()) logger.info('bootstrap validation completed successfully') if not validate_only: try: logger.info('executing bootstrap') details = bs.bootstrap(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details['rest_port']) ws_settings.set_protocol(details['protocol']) logger.info('bootstrapping complete') logger.info('management server is up at {0}'.format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error('bootstrap failed! ({0})'.format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info('executing teardown due to failed bootstrap') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def bootstrap(config_file_path, keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() settings = utils.load_cloudify_working_dir_settings() if settings.get_is_provider_config(): if blueprint_path or inputs: raise ValueError( 'the "blueprint_path" and "inputs" parameters ' 'are not to be used with the deprecated provider API') return provider_common.provider_bootstrap(config_file_path, keep_up, validate_only, skip_validations) env_name = 'manager' # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('executing bootstrap validation') bs.bootstrap_validation(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) logger.info('bootstrap validation completed successfully') if not validate_only: try: logger.info('executing bootstrap') details = bs.bootstrap(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_name = details['provider_name'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider(provider_name) ws_settings.set_provider_context(provider_context) logger.info('bootstrapping complete') logger.info('management server is up at {0}'.format(manager_ip)) except Exception: tpe, value, traceback = sys.exc_info() logger.error('bootstrap failed!') if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info('executing teardown due to failed bootstrap') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback