def teardown(force, ignore_deployments): _validate_force(force) try: management_ip = utils.get_management_server_ip() except exceptions.CloudifyCliError: # management ip does not exist in the local context # this can mean one of two things: # 1. bootstrap was unsuccessful # 2. we are in the wrong directory try: bs.load_env() # this means we are probably in the right directory # which means the teardown was unsuccessful, try to teardown # anyway except BaseException: # this means we are in the wrong directory, have the user # execute the 'use' command to retrieve manager deployment, # because other wise we cannot bootstrap from here. If the # manager is down, the user must return to the original # directory in order to teardown raise exceptions.CloudifyCliError( "You are attempting to execute 'teardown' from an " "invalid directory. Please execute 'cfy use' before " "running this command. If the management server is " "unavailable, you must execute this command from the " "directory you initially bootstrapped from, or from the last " "directory a 'cfy use' command was executed on this manager." ) else: _do_teardown() else: # make sure we don't teardown the manager if there are running # deployments, unless the user explicitly specified it. _validate_deployments(ignore_deployments, management_ip) # update local provider context since the server id might have # changed in case it has gone through a recovery process. _update_local_provider_context(management_ip) # execute teardown _do_teardown()
def teardown(force, ignore_deployments): _validate_force(force) try: management_ip = utils.get_management_server_ip() except exceptions.CloudifyCliError: # management ip does not exist in the local context # this can mean one of two things: # 1. bootstrap was unsuccessful # 2. we are in the wrong directory try: bs.load_env() # this means we are probably in the right directory # which means the teardown was unsuccessful, try to teardown # anyway except BaseException: # this means we are in the wrong directory, have the user # execute the 'use' command to retrieve manager deployment, # because other wise we cannot bootstrap from here. If the # manager is down, the user must return to the original # directory in order to teardown raise exceptions.CloudifyCliError( "You are attempting to execute 'teardown' from an " "invalid directory. Please execute 'cfy use' before " "running this command. If the management server is " "unavailable, you must execute this command from the " "directory you initially bootstrapped from, or from the last " "directory a 'cfy use' command was executed on this manager.") else: _do_teardown() else: # make sure we don't teardown the manager if there are running # deployments, unless the user explicitly specified it. _validate_deployments(ignore_deployments, management_ip) # update local provider context since the server id might have # changed in case it has gone through a recovery process. _update_local_provider_context(management_ip) # execute teardown _do_teardown()
def _dump_manager_deployment(): from cloudify_cli.bootstrap.bootstrap import dump_manager_deployment from cloudify_cli.bootstrap.bootstrap import load_env # explicitly write the manager node instance id to local storage env = load_env('manager') with env.storage.payload() as payload: payload['manager_node_instance_id'] = ctx.instance.id # explicitly flush runtime properties to local storage ctx.instance.update() return dump_manager_deployment()
def bootstrap(keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() env_name = 'manager' # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('Executing bootstrap validation...') bs.bootstrap_validation( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver()) logger.info('Bootstrap validation completed successfully') if not validate_only: try: logger.info('Executing manager bootstrap...') details = bs.bootstrap( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details['rest_port']) ws_settings.set_protocol(details['protocol']) logger.info('Bootstrap complete') logger.info('Manager is up at {0}'.format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error('Bootstrap failed! ({0})'.format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info( 'Executing teardown due to failed bootstrap...') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def bootstrap( keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size, ): logger = get_logger() env_name = "manager" # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command' ) if not skip_validations: logger.info("Executing bootstrap validation...") bs.bootstrap_validation( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver(), ) logger.info("Bootstrap validation completed successfully") elif inputs: # The user expects that `--skip-validations` will also ignore # bootstrap validations and not only creation_validations inputs = common.add_ignore_bootstrap_validations_input(inputs) if not validate_only: try: logger.info("Executing manager bootstrap...") details = bs.bootstrap( blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, ) manager_ip = details["manager_ip"] provider_context = details["provider_context"] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details["manager_key_path"]) ws_settings.set_management_user(details["manager_user"]) ws_settings.set_management_port(details["manager_port"]) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details["rest_port"]) ws_settings.set_rest_protocol(details["rest_protocol"]) logger.info("Bootstrap complete") logger.info("Manager is up at {0}".format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error("Bootstrap failed! ({0})".format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info("Executing teardown due to failed bootstrap...") bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def _load_private_ip(inputs): try: return inputs['private_ip'] or load_env().outputs()['private_ip'] except Exception: raise exceptions.CloudifyCliError('Private IP must be provided for ' 'the upgrade/rollback process')
import sys from cloudify_cli import utils from cloudify_cli.bootstrap import bootstrap as bs from cloudify_cli.bootstrap import tasks as bstasks with utils.update_wd_settings() as settings: settings.set_management_key(sys.argv[1]) print 'Manager key set to path: ' + sys.argv[1] provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment') ) env = bs.load_env('manager') storage = env.storage for instance in storage.get_node_instances(): manager_user = instance.runtime_properties.get( bstasks.MANAGER_USER_RUNTIME_PROPERTY ) if manager_user: settings.set_management_user(manager_user) print 'Manager user set to: ' + manager_user break
def bootstrap(keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() env_name = 'manager' # Verify directory is initialized utils.get_context_path() # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('executing bootstrap validation') bs.bootstrap_validation(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins, resolver=utils.get_import_resolver()) logger.info('bootstrap validation completed successfully') if not validate_only: try: logger.info('executing bootstrap') details = bs.bootstrap(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider_context(provider_context) ws_settings.set_rest_port(details['rest_port']) ws_settings.set_protocol(details['protocol']) logger.info('bootstrapping complete') logger.info('management server is up at {0}'.format(manager_ip)) except Exception as ex: tpe, value, traceback = sys.exc_info() logger.error('bootstrap failed! ({0})'.format(str(ex))) if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info('executing teardown due to failed bootstrap') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback
def let_user_supplement_credentials(settings): print 'The script is not able to perform "cfy ssh" and is not able to fix it automatically..' manager_key = raw_input("Please provide a path to the %s manager's key: " % manager_version) settings.set_management_key(manager_key) manager_user = raw_input("Please provide the %s manager's user name: " % manager_version) settings.set_management_user(manager_user) with utils.update_wd_settings() as settings: if settings.get_management_key() is None or settings.get_management_user() is None: provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment') ) env = bs.load_env('manager') # literal string here, no way to improve it now.. storage = env.storage for instance in storage.get_node_instances(): manager_key = instance.runtime_properties.get(bstasks.MANAGER_KEY_PATH_RUNTIME_PROPERTY) if manager_key: settings.set_management_key(manager_key) manager_user = instance.runtime_properties.get(bstasks.MANAGER_USER_RUNTIME_PROPERTY) if manager_user: settings.set_management_user(manager_user) if manager_user or manager_key: if not manager_user or not manager_key: let_user_supplement_credentials(settings) break else: let_user_supplement_credentials(settings)
import sys from cloudify_cli import utils from cloudify_cli.bootstrap import bootstrap as bs from cloudify_cli.bootstrap import tasks as bstasks with utils.update_wd_settings() as settings: settings.set_management_key(sys.argv[1]) print 'Manager key set to path: ' + sys.argv[1] provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) env = bs.load_env('manager') storage = env.storage for instance in storage.get_node_instances(): manager_user = instance.runtime_properties.get( bstasks.MANAGER_USER_RUNTIME_PROPERTY) if manager_user: settings.set_management_user(manager_user) print 'Manager user set to: ' + manager_user break
def bootstrap(config_file_path, keep_up, validate_only, skip_validations, blueprint_path, inputs, install_plugins, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() settings = utils.load_cloudify_working_dir_settings() if settings.get_is_provider_config(): if blueprint_path or inputs: raise ValueError( 'the "blueprint_path" and "inputs" parameters ' 'are not to be used with the deprecated provider API') return provider_common.provider_bootstrap(config_file_path, keep_up, validate_only, skip_validations) env_name = 'manager' # verifying no environment exists from a previous bootstrap try: bs.load_env(env_name) except IOError: # Environment is clean pass else: raise RuntimeError( "Can't bootstrap because the environment is not clean. Clean the " 'environment by calling teardown or reset it using the "cfy init ' '-r" command') if not skip_validations: logger.info('executing bootstrap validation') bs.bootstrap_validation(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) logger.info('bootstrap validation completed successfully') if not validate_only: try: logger.info('executing bootstrap') details = bs.bootstrap(blueprint_path, name=env_name, inputs=inputs, task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size, install_plugins=install_plugins) manager_ip = details['manager_ip'] provider_name = details['provider_name'] provider_context = details['provider_context'] with utils.update_wd_settings() as ws_settings: ws_settings.set_management_server(manager_ip) ws_settings.set_management_key(details['manager_key_path']) ws_settings.set_management_user(details['manager_user']) ws_settings.set_provider(provider_name) ws_settings.set_provider_context(provider_context) logger.info('bootstrapping complete') logger.info('management server is up at {0}'.format(manager_ip)) except Exception: tpe, value, traceback = sys.exc_info() logger.error('bootstrap failed!') if not keep_up: try: bs.load_env(env_name) except IOError: # the bootstrap exception occurred before environment was # even initialized - nothing to teardown. pass else: logger.info('executing teardown due to failed bootstrap') bs.teardown(name=env_name, task_retries=5, task_retry_interval=30, task_thread_pool_size=1) raise tpe, value, traceback