def create_disk_from_distro(vm_=None, LinodeID=None, swapsize=None): ''' Create the disk for the linode ''' conn = get_conn() pubkey = get_pubkey(vm_) rootpass = get_password(vm_) kwargs = {} if pubkey: kwargs.update({'rootSSHKey': pubkey}) if rootpass: kwargs.update({'rootPass': rootpass}) else: raise SaltCloudConfigError('The Linode driver requires a password.') result = conn.linode_disk_createfromdistribution( LinodeID=LinodeID, DistributionID=get_image(conn, vm_), Label='root', Size=get_disk_size(vm_, get_size(conn, vm_)['disk'], get_swap(vm_)), **kwargs) return result
def wait_for_passwd(host, port=22, ssh_timeout=15, username='******', password=None, key_filename=None, maxtries=15, trysleep=1, display_ssh_output=True): ''' Wait until ssh connection can be accessed via password or ssh key ''' trycount = 0 while trycount < maxtries: connectfail = False try: kwargs = { 'hostname': host, 'port': port, 'username': username, 'timeout': ssh_timeout, 'display_ssh_output': display_ssh_output } if key_filename: if not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename)) kwargs['key_filename'] = key_filename log.debug('Using {0} as the key_filename'.format(key_filename)) elif password: kwargs['password'] = password log.debug( 'Using password authentication ({0})'.format(password)) trycount += 1 log.debug( 'Attempting to authenticate as {0} (try {1} of {2})'.format( username, trycount, maxtries)) status = root_cmd('date', tty=False, sudo=False, **kwargs) if status != 0: connectfail = True if trycount < maxtries: time.sleep(trysleep) continue log.error( 'Authentication failed: status code {0}'.format(status)) return False if connectfail is False: return True return False except Exception: if trycount >= maxtries: return False time.sleep(trysleep)
def destroy(name, call=None): ''' Destroy a node. Will check termination protection and warn if enabled. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') salt.utils.cloud.fire_event('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), {'name': name}, transport=__opts__['transport']) data = show_instance(name, call='action') node = query(method='droplets', droplet_id=data['id'], http_method='delete') delete_record = config.get_cloud_config_value( 'delete_dns_record', get_configured_provider(), __opts__, search_global=False, default=None, ) if delete_record is not None: if not isinstance(delete_record, bool): raise SaltCloudConfigError( "'delete_dns_record' should be a boolean value.") if delete_record: delete_dns_record(name) salt.utils.cloud.fire_event('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), {'name': name}, transport=__opts__['transport']) if __opts__.get('update_cachedir', False) is True: salt.utils.cloud.delete_minion_cachedir( name, __active_provider_name__.split(':')[0], __opts__) return node
def get_auth(vm_): ''' Return either NodeAuthSSHKey or NodeAuthPassword, preferring NodeAuthSSHKey if both are provided. ''' if get_pubkey(vm_) is not None: return NodeAuthSSHKey(get_pubkey(vm_)) elif get_password(vm_) is not None: return NodeAuthPassword(get_password(vm_)) else: raise SaltCloudConfigError( 'The Linode driver requires either a password or ssh_pubkey with ' 'corresponding ssh_private_key.')
def minion_config(opts, vm_): ''' Return a minion's configuration for the provided options and VM ''' # Let's get a copy of the salt minion default options minion = salt.config.DEFAULT_MINION_OPTS.copy() # Some default options are Null, let's set a reasonable default minion.update(log_level='info', log_level_logfile='info') # Now, let's update it to our needs minion['id'] = vm_['name'] master_finger = salt.config.get_cloud_config_value('master_finger', vm_, opts) if master_finger is not None: minion['master_finger'] = master_finger minion.update( # Get ANY defined minion settings, merging data, in the following order # 1. VM config # 2. Profile config # 3. Global configuration salt.config.get_cloud_config_value('minion', vm_, opts, default={}, search_global=True)) make_master = salt.config.get_cloud_config_value('make_master', vm_, opts) if 'master' not in minion and make_master is not True: raise SaltCloudConfigError( 'A master setting was not defined in the minion\'s configuration.') # Get ANY defined grains settings, merging data, in the following order # 1. VM config # 2. Profile config # 3. Global configuration minion.setdefault('grains', {}).update( salt.config.get_cloud_config_value('grains', vm_, opts, default={}, search_global=True)) return minion
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'image_id': get_image(vm_), 'region_id': get_location(vm_), } key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename ) ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None ) kwargs['private_networking'] = 'true' if private_networking else 'false' salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': kwargs}, ) region = '' if kwargs['region_id'] is not None: region = 'SCHED_REQUIREMENTS="ID={0}"'.format(kwargs['region_id']) try: server, user, password = _get_xml_rpc() ret = server.one.template.instantiate(user+':'+password, int(kwargs['image_id']), kwargs['name'], False, region)[1] except Exception as exc: log.error( 'Error creating {0} on OpenNebula\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format( vm_['name'], exc.message ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if not data: # Trigger an error in the wait_for_ip function return False if data['state'] == '7': return False if data['lcm_state'] == '3': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'],), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=2), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'host': data['private_ips'][0], 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value( 'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud' ), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'sudo': config.get_cloud_config_value( 'sudo', vm_, __opts__, default=(ssh_username != 'root') ), 'sudo_password': config.get_cloud_config_value( 'sudo_password', vm_, __opts__, default=None ), 'tty': config.get_cloud_config_value( 'tty', vm_, __opts__, default=False ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret = {} ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error( 'Failed to start Salt on Cloud VM {0}'.format( vm_['name'] ) ) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format(vm_['name'], vm_['image'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format(vm_['name'], vm_['size'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False kwargs['key_name'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) floating = [] files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image_id'], 'size': kwargs['flavor_id'] } }, transport=__opts__['transport']) try: data = conn.boot(**kwargs) except Exception as exc: log.error( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_, data, floating): try: nodelist = list_nodes_full() log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(nodelist[vm_['name']].__dict__))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']].state == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = nodelist[vm_['name']].get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(nodelist[vm_['name']].id).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info(('Attaching floating IP "{0}"' ' to node "{1}"').format(ip, name)) except Exception as e: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = nodelist[vm_['name']].private_ips public = nodelist[vm_['name']].public_ips if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn(('Public IP address was not ready when we last' ' checked. Appending public IP address now.')) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data, floating), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'name': vm_['name'], 'sock_dir': __opts__['sock_dir'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } if ssh_username != 'root': deploy_kwargs['username'] = ssh_username deploy_kwargs['tty'] = True log.debug('Using {0} as SSH username'.format(ssh_username)) if key_filename is not None: deploy_kwargs['key_filename'] = key_filename log.debug('Using {0} as SSH key file'.format(key_filename)) elif 'password' in data.extra: deploy_kwargs['password'] = data.extra['password'] log.debug('Logging into SSH using password') ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs['script'] = deploy_script.script # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to deploy and start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data.__dict__) if 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def deploy_script(host, port=22, timeout=900, username='******', password=None, key_filename=None, script=None, name=None, pub_key=None, sock_dir=None, provider=None, conf_file=None, start_action=None, make_master=False, master_pub=None, master_pem=None, master_conf=None, minion_pub=None, minion_pem=None, minion_conf=None, keep_tmp=False, script_args=None, script_env=None, ssh_timeout=15, make_syndic=False, make_minion=True, display_ssh_output=True, preseed_minion_keys=None, parallel=False, sudo_password=None, sudo=False, tty=None, deploy_command='/tmp/.saltcloud/deploy.sh', tmp_dir='/tmp/.saltcloud', **kwargs): ''' Copy a deploy script to a remote server, execute it, and remove it ''' if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename)) gateway = None if 'gateway' in kwargs: gateway = kwargs['gateway'] starttime = time.mktime(time.localtime()) log.debug('Deploying {0} at {1}'.format(host, starttime)) if wait_for_port(host=host, port=port): log.debug('SSH port {0} on {1} is available'.format(port, host)) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) if wait_for_passwd(host, port=port, username=username, password=password, key_filename=key_filename, ssh_timeout=ssh_timeout, display_ssh_output=display_ssh_output): log.debug('Logging into {0}:{1} as {2}'.format( host, port, username)) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) kwargs = { 'hostname': host, 'port': port, 'username': username, 'timeout': ssh_timeout, 'display_ssh_output': display_ssh_output, 'sudo_password': sudo_password, } if key_filename: log.debug('Using {0} as the key_filename'.format(key_filename)) kwargs['key_filename'] = key_filename elif password: log.debug('Using {0} as the password'.format(password)) kwargs['password'] = password #FIXME: this try-except doesn't make sense! Something is missing... try: log.debug('SSH connection to {0} successful'.format(host)) except Exception as exc: log.error( 'There was an error in deploy_script: {0}'.format(exc)) if provider == 'ibmsce': subsys_command = ('sed -i "s/#Subsystem/Subsystem/" ' '/etc/ssh/sshd_config') root_cmd(subsys_command, tty, sudo, **kwargs) root_cmd('service sshd restart', tty, sudo, **kwargs) root_cmd( '[ ! -d {0} ] && (mkdir -p {0}; chmod 700 {0}) || ' 'echo "Directory {0} already exists..."'.format(tmp_dir), tty, sudo, **kwargs) if sudo: comps = tmp_dir.lstrip('/').rstrip('/').split('/') if len(comps) > 0: if len(comps) > 1 or comps[0] != 'tmp': root_cmd('chown {0}. {1}'.format(username, tmp_dir), tty, sudo, **kwargs) # Minion configuration if minion_pem: scp_file('{0}/minion.pem'.format(tmp_dir), minion_pem, kwargs) root_cmd('chmod 600 {0}/minion.pem'.format(tmp_dir), tty, sudo, **kwargs) if minion_pub: scp_file('{0}/minion.pub'.format(tmp_dir), minion_pub, kwargs) if minion_conf: if not isinstance(minion_conf, dict): # Let's not just fail regarding this change, specially # since we can handle it raise DeprecationWarning( '`salt.utils.cloud.deploy_script now only accepts ' 'dictionaries for it\'s `minion_conf` parameter. ' 'Loading YAML...') minion_grains = minion_conf.pop('grains', {}) if minion_grains: scp_file('{0}/grains'.format(tmp_dir), salt_config_to_yaml(minion_grains), kwargs) scp_file('{0}/minion'.format(tmp_dir), salt_config_to_yaml(minion_conf), kwargs) # Master configuration if master_pem: scp_file('{0}/master.pem'.format(tmp_dir), master_pem, kwargs) root_cmd('chmod 600 {0}/master.pem'.format(tmp_dir), tty, sudo, **kwargs) if master_pub: scp_file('{0}/master.pub'.format(tmp_dir), master_pub, kwargs) if master_conf: if not isinstance(master_conf, dict): # Let's not just fail regarding this change, specially # since we can handle it raise DeprecationWarning( '`salt.utils.cloud.deploy_script now only accepts ' 'dictionaries for it\'s `master_conf` parameter. ' 'Loading from YAML ...') scp_file('{0}/master'.format(tmp_dir), salt_config_to_yaml(master_conf), kwargs) # XXX: We need to make these paths configurable preseed_minion_keys_tempdir = '{0}/preseed-minion-keys'.format( tmp_dir) if preseed_minion_keys is not None: # Create remote temp dir root_cmd('mkdir "{0}"'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) root_cmd('chmod 700 "{0}"'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) if kwargs['username'] != 'root': root_cmd( 'chown {0} "{1}"'.format(kwargs['username'], preseed_minion_keys_tempdir), tty, sudo, **kwargs) # Copy pre-seed minion keys for minion_id, minion_key in preseed_minion_keys.iteritems(): rpath = os.path.join(preseed_minion_keys_tempdir, minion_id) scp_file(rpath, minion_key, kwargs) if kwargs['username'] != 'root': root_cmd( 'chown -R root "{0}"'.format( preseed_minion_keys_tempdir), tty, sudo, **kwargs) # The actual deploy script if script: scp_file('{0}/deploy.sh'.format(tmp_dir), script, kwargs) root_cmd('chmod +x {0}/deploy.sh'.format(tmp_dir), tty, sudo, **kwargs) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) queue = None process = None # Consider this code experimental. It causes Salt Cloud to wait # for the minion to check in, and then fire a startup event. # Disabled if parallel because it doesn't work! if start_action and not parallel: queue = multiprocessing.Queue() process = multiprocessing.Process(target=check_auth, kwargs=dict( name=name, pub_key=pub_key, sock_dir=sock_dir, timeout=newtimeout, queue=queue)) log.debug('Starting new process to wait for salt-minion') process.start() # Run the deploy script if script: if 'bootstrap-salt' in script: deploy_command += ' -c {0}'.format(tmp_dir) if make_syndic is True: deploy_command += ' -S' if make_master is True: deploy_command += ' -M' if make_minion is False: deploy_command += ' -N' if keep_tmp is True: deploy_command += ' -K' if preseed_minion_keys is not None: deploy_command += ' -k {0}'.format( preseed_minion_keys_tempdir) if script_args: deploy_command += ' {0}'.format(script_args) if script_env: if not isinstance(script_env, dict): raise SaltCloudSystemExit( 'The \'script_env\' configuration setting NEEDS ' 'to be a dictionary not a {0}'.format( type(script_env))) environ_script_contents = ['#!/bin/sh'] for key, value in script_env.iteritems(): environ_script_contents.append( 'setenv {0} \'{1}\' >/dev/null 2>&1 || ' 'export {0}=\'{1}\''.format(key, value)) environ_script_contents.append(deploy_command) # Upload our environ setter wrapper scp_file('{0}/environ-deploy-wrapper.sh'.format(tmp_dir), '\n'.join(environ_script_contents), kwargs) root_cmd( 'chmod +x {0}/environ-deploy-wrapper.sh'.format( tmp_dir), tty, sudo, **kwargs) # The deploy command is now our wrapper deploy_command = '{0}/environ-deploy-wrapper.sh'.format( tmp_dir, ) if root_cmd(deploy_command, tty, sudo, **kwargs) != 0: raise SaltCloudSystemExit( 'Executing the command {0!r} failed'.format( deploy_command)) log.debug('Executed command {0!r}'.format(deploy_command)) # Remove the deploy script if not keep_tmp: root_cmd('rm -f {0}/deploy.sh'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/deploy.sh'.format(tmp_dir)) if script_env: root_cmd( 'rm -f {0}/environ-deploy-wrapper.sh'.format( tmp_dir), tty, sudo, **kwargs) log.debug( 'Removed {0}/environ-deploy-wrapper.sh'.format( tmp_dir)) if keep_tmp: log.debug( 'Not removing deployment files from {0}/'.format(tmp_dir)) else: # Remove minion configuration if minion_pub: root_cmd('rm -f {0}/minion.pub'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/minion.pub'.format(tmp_dir)) if minion_pem: root_cmd('rm -f {0}/minion.pem'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/minion.pem'.format(tmp_dir)) if minion_conf: root_cmd('rm -f {0}/grains'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/grains'.format(tmp_dir)) root_cmd('rm -f {0}/minion'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/minion'.format(tmp_dir)) # Remove master configuration if master_pub: root_cmd('rm -f {0}/master.pub'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/master.pub'.format(tmp_dir)) if master_pem: root_cmd('rm -f {0}/master.pem'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/master.pem'.format(tmp_dir)) if master_conf: root_cmd('rm -f {0}/master'.format(tmp_dir), tty, sudo, **kwargs) log.debug('Removed {0}/master'.format(tmp_dir)) # Remove pre-seed keys directory if preseed_minion_keys is not None: root_cmd('rm -rf {0}'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) log.debug( 'Removed {0}'.format(preseed_minion_keys_tempdir)) if start_action and not parallel: queuereturn = queue.get() process.join() if queuereturn and start_action: #client = salt.client.LocalClient(conf_file) #output = client.cmd_iter( # host, 'state.highstate', timeout=timeout #) #for line in output: # print(line) log.info('Executing {0} on the salt-minion'.format( start_action)) root_cmd('salt-call {0}'.format(start_action), tty, sudo, **kwargs) log.info( 'Finished executing {0} on the salt-minion'.format( start_action)) # Fire deploy action fire_event( 'event', '{0} has been deployed at {1}'.format(name, host), 'salt/cloud/{0}/deploy_script'.format(name), ) return True return False
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') vm_['key_filename'] = key_filename salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for {0[name]!r}'.format(vm_)) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value('keysize', vm_, __opts__)) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[ 'change_password'] is True: vm_['password'] = sup.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id def __query_node_data(vm_, data): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(node))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return result = [] if 'private_ips' not in node and 'public_ips' not in node and \ 'access_ip' in node.get('extra', {}): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn(('Public IP address was not ready when we last' ' checked. Appending public IP address now.')) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if get_salt_interface(vm_) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) elif rackconnect(vm_) is True and get_salt_interface(vm_) != 'private_ips': salt_ip_address = data.public_ips else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: {0}'.format(salt_ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on Openstack and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) # Note: This currently requires libcloud trunk avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['ex_availability_zone'] = avz kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) floating = [] if HAS014: if networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud # to dynamically allocate new address but that might raise SaltCloudSystemExit( 'Floating pool {0!r} does not have any more ' 'please create some more or use a different ' 'pool.'.format(net['floating'])) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool('', conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud to # dynamically allocate new address but that might be # tricky to manage. raise SaltCloudSystemExit( 'There are no more floating IP addresses ' 'available, please create some more') except Exception as e: if str(e).startswith('404'): pass else: raise vm_['floating'] = floating files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile'] } }, transport=__opts__['transport']) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', None) return data, vm_
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename ) ) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.' ) salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() vm_['location'] = config.get_cloud_config_value('location', vm_, __opts__) kwargs = { 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_), 'location': get_location(conn, vm_), 'auth': NodeAuthSSHKey( config.get_cloud_config_value('ssh_key_name', vm_, __opts__) ) } log.debug( 'Creating instance on {0} at {1}'.format( time.strftime('%Y-%m-%d'), time.strftime('%H:%M:%S') ) ) salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': {'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'location': kwargs['location'].name}}, ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on IBMSCE\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format( vm_['name'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False def __query_node_data(vm_name, data): nodelist = list_nodes() public_ips = nodelist[vm_name]['public_ips'] private_ips = nodelist[vm_name]['private_ips'] if private_ips: data.private_ips = private_ips if public_ips: data.public_ips = public_ips if ssh_interface(vm_) == 'private_ips' and private_ips: return data if ssh_interface(vm_) == 'public_ips' and public_ips: return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], data), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=25 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=15), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) log.debug( 'Deploying {0} using IP address {1}'.format( vm_['name'], data.public_ips[0] ) ) deploy_kwargs = { 'host': data.public_ips[0], 'username': '******', 'provider': 'ibmsce', 'password': data.extra['password'], 'key_filename': key_filename, 'script': deploy_script.script, 'name': vm_['name'], 'sudo': True, 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error( 'Failed to start Salt on Cloud VM {0}'.format(vm_['name']) ) ret.update(data) if 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_config_value('deploy', vm_, __opts__) key_filename = config.get_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename ) ) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.' ) salt.cloud.utils.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.cloud.utils.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = { 'name': vm_['name'] } try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False kwargs['ex_keyname'] = config.get_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_config_value( 'networks', vm_, __opts__, search_global=False ) floating = [] if HAS014 and networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection ) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. For # now, we raise an execption and exit. A future enhancement # might be to allow salt-cloud to dynamically allociate new # address but that might be tricky to manage. raise SaltCloudSystemExit( "Floating pool '%s' has not more address available, " "please create some more or use a different pool." % net['floating'] ) files = config.get_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_config_value( 'userdata_file', vm_, __opts__, search_global=False ) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.cloud.utils.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': {'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name}}, ) kwargs['ex_metadata'] = config.get_config_value( 'metadata', vm_, __opts__, default={}, search_global=False ) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError( '\'metadata\' should be a dict.' ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False def __query_node_data(vm_, data, floating): try: nodelist = list_nodes() log.debug( 'Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat( nodelist[vm_['name']] ) ) ) except Exception, err: log.error( 'Failed to get nodes list: {0}'.format( err ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']]['state'] == node_state( NodeState.RUNNING ) if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = nodelist[vm_['name']].get('extra') rc_status = extra.get('metadata').get('rackconnect_automation_status') access_ip = extra.get('access_ip') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = nodelist[vm_['name']].get('extra') mc_status = extra.get('metadata').get('rax_service_level_automation') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info( 'Attaching floating IP "{0}" to node "{1}"'.format(ip, name) ) except Exception: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = nodelist[vm_['name']]['private_ips'] public = nodelist[vm_['name']]['public_ips'] if private and not public: log.warn( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.cloud.utils.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn( 'Public IP address was not ready when we last checked. Appending public IP address now.' ) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data
def create(vm_): ''' Provision a single machine ''' if config.get_cloud_config_value('deploy', vm_, __opts__) is False: return { 'Error': { 'No Deploy': '\'deploy\' is not enabled. Not deploying.' } } key_filename = config.get_cloud_config_value('key_filename', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_keyfile {0!r} does not exist'.format( key_filename)) if key_filename is None and salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_keyfile\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') ret = {} log.info('Provisioning existing machine {0}'.format(vm_['name'])) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__) deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': vm_['ssh_host'], 'username': ssh_username, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'password': config.get_cloud_config_value('password', vm_, __opts__, search_global=False), 'key_filename': key_filename, 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_), 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True) } if 'ssh_port' in vm_: deploy_kwargs.update({'port': vm_['ssh_port']}) if 'salt_host' in vm_: deploy_kwargs.update({'salt_host': vm_['salt_host']}) # forward any info about possible ssh gateway to deploy script # as some providers need also a 'gateway' configuration if 'gateway' in vm_: deploy_kwargs.update({'gateway': vm_['gateway']}) # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value('make_minion', vm_, __opts__, default=True) win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: ret['deployed'] = deployed log.info('Salt installed on {0}'.format(vm_['name'])) return ret log.error('Failed to start Salt on host {0}'.format(vm_['name'])) return { 'Error': { 'Not Deployed': 'Failed to start Salt on host {0}'.format(vm_['name']) } }
def create(vm_): ''' Create a single VM from a data dict ''' key_filename = config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename ) ) location = get_location(vm_) log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location)) conn = get_conn(location=location) usernames = ssh_username(vm_) kwargs = { 'ssh_key': config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False ), 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_), 'location': get_availability_zone(conn, vm_) } ex_keyname = keyname(vm_) if ex_keyname: kwargs['ex_keyname'] = ex_keyname ex_securitygroup = securitygroup(vm_) if ex_securitygroup: kwargs['ex_securitygroup'] = ex_securitygroup ex_blockdevicemappings = block_device_mappings(vm_) if ex_blockdevicemappings: kwargs['ex_blockdevicemappings'] = ex_blockdevicemappings ex_iam_profile = iam_profile(vm_) if ex_iam_profile: # libcloud does not implement 'iam_profile' yet. # A pull request has been suggested # https://github.com/apache/libcloud/pull/150 raise SaltCloudConfigError( 'libcloud does not implement \'iam_profile\' yet. ' 'Use EC2 driver instead.' ) tags = config.get_cloud_config_value('tag', vm_, __opts__, {}, search_global=False) if not isinstance(tags, dict): raise SaltCloudConfigError( '\'tag\' should be a dict.' ) kwargs['ex_metadata'] = config.get_cloud_config_value('metadata', vm_, __opts__, default={}, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError( '\'metadata\' should be a dict.' ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on AWS\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False log.info('Created node {0}'.format(vm_['name'])) def __get_node_data(conn, vm_name): data = get_node(conn, vm_name) if data is None: # Trigger a failure in the waiting function return False if ssh_interface(vm_) == 'private_ips' and data.private_ips: return data if ssh_interface(vm_) == 'public_ips' and data.public_ips: return data try: data = salt.utils.cloud.wait_for_ip( __get_node_data, update_args=(conn, vm_['name']), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=0.5), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) if tags: set_tags(vm_['name'], tags, call='action') if ssh_interface(vm_) == 'private_ips': log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0])) ip_address = data.private_ips[0] else: log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0])) ip_address = data.public_ips[0] username = '******' ssh_connect_timeout = config.get_cloud_config_value( 'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes ) if salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout): for user in usernames: if salt.utils.cloud.wait_for_passwd( host=ip_address, username=user, ssh_timeout=config.get_cloud_config_value( 'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60), key_filename=key_filename): username = user break else: raise SaltCloudSystemExit( 'Failed to authenticate against remote ssh' ) ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'host': ip_address, 'username': username, 'key_filename': key_filename, 'tmp_dir': config.get_cloud_config_value( 'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud' ), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'tty': config.get_cloud_config_value( 'tty', vm_, __opts__, default=True ), 'script': deploy_script.script, 'name': vm_['name'], 'sudo': config.get_cloud_config_value( 'sudo', vm_, __opts__, default=(username != 'root') ), 'sudo_password': config.get_cloud_config_value( 'sudo_password', vm_, __opts__, default=None ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'conf_file': __opts__['conf_file'], 'sock_dir': __opts__['sock_dir'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM ret['deploy_kwargs'] = deploy_kwargs deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {name}'.format(**vm_)) else: log.error('Failed to start Salt on Cloud VM {name}'.format(**vm_)) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__) ) ) volumes = config.get_cloud_config_value( 'volumes', vm_, __opts__, search_global=True ) if volumes: log.info('Create and attach volumes to node {0}'.format(data.name)) create_attach_volumes(volumes, location, data) return ret
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size': get_size(vm_), 'image': get_image(vm_), 'region': get_location(vm_), } kwargs['ssh_keys'] = [] # backwards compat ssh_key_name = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) if ssh_key_name: kwargs['ssh_keys'].append(get_keyid(ssh_key_name)) ssh_key_names = config.get_cloud_config_value('ssh_key_names', vm_, __opts__, search_global=False, default=False) if ssh_key_names: for key in ssh_key_names.split(','): kwargs['ssh_keys'].append(get_keyid(key)) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename)) if key_filename is None: raise SaltCloudConfigError( 'The DigitalOcean driver requires an ssh_key_file and an ssh_key_name ' 'because it does not supply a root password upon building the server.' ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError( "'private_networking' should be a boolean value.") kwargs['private_networking'] = private_networking backups_enabled = config.get_cloud_config_value( 'backups_enabled', vm_, __opts__, search_global=False, default=None, ) if backups_enabled is not None: if not isinstance(backups_enabled, bool): raise SaltCloudConfigError( "'backups_enabled' should be a boolean value.") kwargs['backups'] = backups_enabled ipv6 = config.get_cloud_config_value( 'ipv6', vm_, __opts__, search_global=False, default=None, ) if ipv6 is not None: if not isinstance(ipv6, bool): raise SaltCloudConfigError("'ipv6' should be a boolean value.") kwargs['ipv6'] = ipv6 salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on DIGITAL_OCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(vm_name): data = show_instance(vm_name, 'action') if not data: # Trigger an error in the wait_for_ip function return False if data['networks'].get('v4'): for network in data['networks']['v4']: if network['type'] == 'public': return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) for network in data['networks']['v4']: if network['type'] == 'public': ip_address = network['ip_address'] create_record = config.get_cloud_config_value( 'create_dns_record', vm_, __opts__, search_global=False, default=None, ) if create_record is not None: if not isinstance(create_record, bool): raise SaltCloudConfigError( "'create_dns_record' should be a boolean value.") if create_record: create_dns_record(vm_['name'], ip_address) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None: key_filename = os.path.expanduser(key_filename) if not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format(vm_['name'], vm_['image'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format(vm_['name'], vm_['size'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) floating = [] if HAS014: if networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud # to dynamically allocate new address but that might raise SaltCloudSystemExit( 'Floating pool {0!r} does not have any more ' 'please create some more or use a different ' 'pool.'.format(net['floating'])) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool('', conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud to # dynamically allocate new address but that might be # tricky to manage. raise SaltCloudSystemExit( 'There are no more floating IP addresses ' 'available, please create some more') except Exception as e: if str(e).startswith('404'): pass else: raise files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile'] } }, transport=__opts__['transport']) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_, data, floating): try: nodelist = list_nodes_full() log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(nodelist[vm_['name']]))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']]['state'] == NodeState.RUNNING if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: check_libcloud_version((0, 14, 0), why='rackconnect: True') extra = nodelist[vm_['name']].get('extra') rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = nodelist[vm_['name']].get('extra') mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info('Attaching floating IP {0!r} to node {1!r}'.format( ip, name)) except Exception: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = nodelist[vm_['name']]['private_ips'] public = nodelist[vm_['name']]['public_ips'] if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn( 'Public IP address was not ready when we last checked.' ' Appending public IP address now.') public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data, floating), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'name': vm_['name'], 'sock_dir': __opts__['sock_dir'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } if ssh_username != 'root': deploy_kwargs['username'] = ssh_username deploy_kwargs['tty'] = True log.debug('Using {0} as SSH username'.format(ssh_username)) if key_filename is not None: deploy_kwargs['key_filename'] = key_filename log.debug('Using {0} as SSH key file'.format(key_filename)) elif hasattr(data, 'extra') and 'password' in data.extra: deploy_kwargs['password'] = data.extra['password'] log.debug('Logging into SSH using password') ret = {} sudo = config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')) if sudo is not None: deploy_kwargs['sudo'] = sudo log.debug('Running root commands using sudo') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs['script'] = deploy_script.script # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to deploy and start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data.__dict__) if hasattr(data, 'extra') and 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret