def get_file_from_manager(remote_source_path, destination_path): key_filename = os.path.expanduser(utils.get_management_key()) with fab.settings( fab.hide('running', 'stdout'), host_string=utils.build_manager_host_string(), key_filename=key_filename): fab.get(remote_source_path, destination_path)
def recover(force, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ: # user defined the key file path inside an env variable. # validate the existence of the keyfile because it will later be # used in a fabric task to ssh to the manager key_path = os.path.expanduser(os.environ[ CLOUDIFY_MANAGER_PK_PATH_ENVAR]) if not os.path.isfile(key_path): raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager private key file " "defined in {0} environment variable does not " "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path) ) else: # try retrieving the key file from the local context try: key_path = os.path.expanduser(utils.get_management_key()) if not os.path.isfile(key_path): # manager key file path exists in context but does not exist # in the file system. fail now. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file does not " "exist: {0}. Set the manager private key path via the {1} " "environment variable" .format(key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR) ) # in this case, the recovery is executed from the same directory # that the bootstrap was executed from. we should not have # problems except exceptions.CloudifyCliError: # manager key file path does not exist in the context. this # means the recovery is executed from a different directory than # the bootstrap one. is this case the user must set the # environment variable to continue. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file not found. Set " "the manager private key path via the {0} environment " "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR) ) logger.info('Recovering manager deployment') settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.recover(task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Successfully recovered manager deployment')
def ssh(ssh_plain_mode, ssh_command): logger = get_logger() ssh_path = spawn.find_executable('ssh') logger.debug('SSH executable path: {0}'.format(ssh_path or 'Not found')) if not ssh_path and platform.system() == 'Windows': msg = messages.SSH_WIN_NOT_FOUND raise CloudifyCliError(msg) elif not ssh_path: msg = messages.SSH_LINUX_NOT_FOUND raise CloudifyCliError(msg) else: command = [ ssh_path, '{0}@{1}'.format(get_management_user(), get_management_server_ip()) ] if get_global_verbosity(): command.append('-v') if not ssh_plain_mode: command.extend(['-i', os.path.expanduser(get_management_key())]) if ssh_command: command.extend(['--', ssh_command]) logger.debug('executing command: {0}'.format(' '.join(command))) logger.info('Trying to connect...') from subprocess import call call(command)
def recover(force, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) if CLOUDIFY_MANAGER_PK_PATH_ENVAR not in os.environ: if not os.path.isfile(os.path.expanduser(utils.get_management_key())): raise RuntimeError("Can't find manager private key file. Set the " "path to it using the {0} environment variable" .format(CLOUDIFY_MANAGER_PK_PATH_ENVAR)) logger.info('Recovering manager deployment') settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.recover(task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Successfully recovered manager deployment')
def _load_management_key(inputs): try: key_path = inputs['ssh_key_filename'] or utils.get_management_key() return os.path.expanduser(key_path) except Exception: raise exceptions.CloudifyCliError('Manager key must be provided for ' 'the upgrade/rollback process')
def dev(args, task, tasks_file): management_ip = utils.get_management_server_ip() _execute(username=get_management_user(), key=get_management_key(), ip=management_ip, task=task, tasks_file=tasks_file, args=args)
def _open_interactive_shell(host_string, command=''): """Used as fabric's open_shell=True doesn't work well. (Disfigures coloring and such...) """ ssh_key_path = os.path.expanduser(utils.get_management_key()) cmd = ['ssh', '-t', host_string, '-i', ssh_key_path] if command: cmd.append(command) subprocess.call(cmd)
def recover(force, task_retries, task_retry_interval, task_thread_pool_size): logger = get_logger() if not force: msg = ("This action requires additional " "confirmation. Add the '-f' or '--force' " "flags to your command if you are certain " "this command should be executed.") raise exceptions.CloudifyCliError(msg) if CLOUDIFY_MANAGER_PK_PATH_ENVAR in os.environ: # user defined the key file path inside an env variable. # validate the existence of the keyfile because it will later be # used in a fabric task to ssh to the manager key_path = os.path.expanduser( os.environ[CLOUDIFY_MANAGER_PK_PATH_ENVAR]) if not os.path.isfile(key_path): raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager private key file " "defined in {0} environment variable does not " "exist: {1}".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR, key_path)) else: # try retrieving the key file from the local context try: key_path = os.path.expanduser(utils.get_management_key()) if not os.path.isfile(key_path): # manager key file path exists in context but does not exist # in the file system. fail now. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file does not " "exist: {0}. Set the manager private key path via the {1} " "environment variable".format( key_path, CLOUDIFY_MANAGER_PK_PATH_ENVAR)) # in this case, the recovery is executed from the same directory # that the bootstrap was executed from. we should not have # problems except exceptions.CloudifyCliError: # manager key file path does not exist in the context. this # means the recovery is executed from a different directory than # the bootstrap one. is this case the user must set the # environment variable to continue. raise exceptions.CloudifyValidationError( "Cannot perform recovery. manager key file not found. Set " "the manager private key path via the {0} environment " "variable".format(CLOUDIFY_MANAGER_PK_PATH_ENVAR)) logger.info('Recovering manager deployment') settings = utils.load_cloudify_working_dir_settings() provider_context = settings.get_provider_context() bs.read_manager_deployment_dump_if_needed( provider_context.get('cloudify', {}).get('manager_deployment')) bs.recover(task_retries=task_retries, task_retry_interval=task_retry_interval, task_thread_pool_size=task_thread_pool_size) logger.info('Successfully recovered manager deployment')
def scp(local_path, path_on_manager, to_manager): scp_path = spawn.find_executable('scp') management_path = '{0}@{1}:{2}'.format(get_management_user(), get_management_server_ip(), path_on_manager) command = [scp_path, '-i', os.path.expanduser(get_management_key())] if to_manager: command += [local_path, management_path] else: command += [management_path, local_path] call(command)
def scp(local_path, path_on_manager, to_manager): scp_path = spawn.find_executable('scp') management_path = '{0}@{1}:{2}'.format( get_management_user(), get_management_server_ip(), path_on_manager ) command = [scp_path, '-i', os.path.expanduser(get_management_key())] if to_manager: command += [local_path, management_path] else: command += [management_path, local_path] call(command)
def put_file_in_manager(source_path, remote_source_path, use_sudo=True, key_filename=None, user=None): if not key_filename: key_filename = os.path.expanduser(utils.get_management_key()) with fab.settings( fab.hide('running', 'stdout'), host_string=utils.build_manager_host_string(user=user), key_filename=key_filename): fab.put(use_sudo=use_sudo, local_path=source_path, remote_path=remote_source_path)
def execute(): key_filename = os.path.expanduser(utils.get_management_key()) with fab.settings( host_string=utils.build_manager_host_string(), key_filename=key_filename, warn_only=True): if use_sudo: result = fab.sudo(command) else: result = fab.run(command) if result.failed: raise CloudifyCliError( 'Failed to execute: {0} ({1})'.format( result.read_command, result.stderr)) return result
def execute(): key_filename = os.path.expanduser(utils.get_management_key()) with fab.settings( host_string=host_string, key_filename=key_filename, warn_only=True): if use_sudo: output = fab.sudo(command) elif open_shell: fab.open_shell(command) return None else: output = fab.run(command) if output.failed: raise CloudifyCliError( 'Failed to execute: {0} ({1})'.format( output.real_command, output.stderr)) return output
def scp(local_path, path_on_manager, to_manager): from cloudify_cli.utils import get_management_user from cloudify_cli.utils import get_management_server_ip from cloudify_cli.utils import get_management_key scp_path = spawn.find_executable('scp') management_path = '{0}@{1}:{2}'.format( get_management_user(), get_management_server_ip(), path_on_manager ) command = [scp_path, '-o', 'StrictHostKeyChecking=no', '-i', os.path.expanduser(get_management_key())] if to_manager: command += [local_path, management_path] else: command += [management_path, local_path] rc = call(command) if rc: raise RuntimeError('Scp failed with exit code: {0}'.format(rc))
def ssh(ssh_plain_mode, ssh_command): logger = get_logger() ssh_path = spawn.find_executable('ssh') logger.debug('SSH executable path: {0}'.format(ssh_path or 'Not found')) if not ssh_path and platform.system() == 'Windows': msg = messages.SSH_WIN_NOT_FOUND raise CloudifyCliError(msg) elif not ssh_path: msg = messages.SSH_LINUX_NOT_FOUND raise CloudifyCliError(msg) else: command = [ssh_path, '{0}@{1}'.format(get_management_user(), get_management_server_ip())] if get_global_verbosity(): command.append('-v') if not ssh_plain_mode: command.extend(['-i', os.path.expanduser(get_management_key())]) if ssh_command: command.extend(['--', ssh_command]) logger.debug('executing command: {0}'.format(' '.join(command))) logger.info('Trying to connect...') from subprocess import call call(command)
import os, sys from cloudify_cli.utils import (get_management_user, get_management_server_ip, get_management_key) command = 'ssh -n -o BatchMode=yes -i %s %s@%s true 2> /dev/null' % ( get_management_key(), get_management_user(), get_management_server_ip()) command_result = os.system(command) sys.exit(os.WEXITSTATUS(command_result))
import os, sys from cloudify_cli.utils import (get_management_user, get_management_server_ip, get_management_key) command = 'ssh -n -o BatchMode=yes -i %s %s@%s true 2> /dev/null' % ( get_management_key(), get_management_user(), get_management_server_ip() ) command_result = os.system(command) sys.exit(os.WEXITSTATUS(command_result))