def create(vm_=None, call=None): ''' Create a single GCE instance from a data dict. ''' if call: raise SaltCloudSystemExit( 'You cannot create an instance with -a or -f.') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) conn = get_conn() kwargs = { 'name': vm_['name'], 'size': __get_size(conn, vm_), 'image': __get_image(conn, vm_), 'location': __get_location(conn, vm_), 'ex_network': __get_network(conn, vm_), 'ex_tags': __get_tags(vm_), 'ex_metadata': __get_metadata(vm_), } log.info('Creating GCE instance {0} in {1}'.format( vm_['name'], kwargs['location'].name)) log.debug('Create instance kwargs {0}'.format(str(kwargs))) salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), { 'name': vm_['name'], 'location': kwargs['location'].name, 'size': kwargs['size'].name, 'image': kwargs['image'].name, }, ) try: node_data = conn.create_node(**kwargs) # pylint: disable=W0142 except Exception as exc: # pylint: disable=W0703 log.error( 'Error creating {0} on GCE\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc.message), exc_info=log.isEnabledFor(logging.DEBUG)) return False node_dict = _expand_node(node_data) if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) ssh_user, ssh_key = __get_ssh_credentials(vm_) deploy_kwargs = { 'host': node_data.public_ips[0], 'username': ssh_user, 'key_filename': ssh_key, 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_user != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=(ssh_user != 'root')), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] node_dict['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': deploy_kwargs}, ) # pylint: disable=W0142 deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(node_dict))) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return node_dict
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size_id': get_size(vm_), 'image_id': get_image(vm_), 'region_id': __get_location(vm_), 'securitygroup_id': get_securitygroup(vm_), } salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on Aliyun ECS\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], exc.message), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if not data: # Trigger an error in the wait_for_ip function return False if data.get('PublicIpAddress', None) is not None: return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) public_ip = data['PublicIpAddress'][0] log.debug('VM {0} is now running'.format(public_ip)) vm_['ssh_host'] = public_ip # The instance is booted and accessable, let's Salt it! ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn(service='SoftLayer_Product_Order') kwargs = { 'complexType': 'SoftLayer_Container_Product_Order_Hardware_Server', 'quantity': 1, 'hardware': [{ 'hostname': vm_['name'], 'domain': vm_['domain'], }], 'packageId': 50, # Baremetal Package 'prices': [ # Size Ex: 1921: 2 x 2.0 GHz Core Bare Metal Instance - 2 GB Ram { 'id': vm_['size'] }, # HDD Ex: 19: 250GB SATA II { 'id': vm_['hdd'] }, # Image Ex: 13963: CentOS 6.0 - Minimal Install (64 bit) { 'id': vm_['image'] }, # The following items are currently required # Reboot / Remote Console { 'id': '905' }, # 1 IP Address { 'id': '21' }, # Host Ping Monitoring { 'id': '55' }, # Email and Ticket Notifications { 'id': '57' }, # Automated Notification Response { 'id': '58' }, # Unlimited SSL VPN Users & 1 PPTP VPN User per account { 'id': '420' }, # Nessus Vulnerability Assessment & Reporting { 'id': '418' }, ], } optional_products = config.get_cloud_config_value('optional_products', vm_, __opts__, default=True) for product in optional_products: kwargs['prices'].append({'id': product}) # Default is 273 (100 Mbps Public & Private Networks) port_speed = config.get_cloud_config_value('port_speed', vm_, __opts__, default=273) kwargs['prices'].append({'id': port_speed}) # Default is 248 (5000 GB Bandwidth) bandwidth = config.get_cloud_config_value('bandwidth', vm_, __opts__, default=248) kwargs['prices'].append({'id': bandwidth}) vlan_id = config.get_cloud_config_value('vlan', vm_, __opts__, default=False) if vlan_id: kwargs['primaryNetworkComponent'] = { 'networkVlan': { 'id': vlan_id, } } location = get_location(vm_) if location: kwargs['location'] = location salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: response = conn.placeOrder(kwargs) # Leaving the following line in, commented, for easy debugging #response = conn.verifyOrder(kwargs) except Exception as exc: log.error( 'Error creating {0} on SoftLayer\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc.message), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def wait_for_ip(): ''' Wait for the IP address to become available ''' nodes = list_nodes_full() if 'primaryIpAddress' in nodes[vm_['name']]: return nodes[vm_['name']]['primaryIpAddress'] time.sleep(1) return False ip_address = salt.utils.cloud.wait_for_fun( wait_for_ip, timeout=config.get_cloud_config_value('wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) ssh_connect_timeout = config.get_cloud_config_value( 'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes ) if not salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout): raise SaltCloudSystemExit('Failed to authenticate against remote ssh') pass_conn = get_conn(service='SoftLayer_Account') mask = { 'virtualGuests': { 'powerState': '', 'operatingSystem': { 'passwords': '' }, }, } def get_passwd(): ''' Wait for the password to become available ''' node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask) for node in node_info: if node['id'] == response['id']: if 'passwords' in node['operatingSystem'] and len( node['operatingSystem']['passwords']) > 0: return node['operatingSystem']['passwords'][0]['password'] time.sleep(5) return False passwd = salt.utils.cloud.wait_for_fun( get_passwd, timeout=config.get_cloud_config_value('wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) response['password'] = passwd response['public_ip'] = ip_address ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'username': ssh_username, 'password': passwd, 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(response))) ret.update(response) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def _salt(fun, *args, **kw): '''Execute a salt function on a specific minion Special kwargs: salt_target target to exec things on salt_timeout timeout for jobs salt_job_poll poll interval to wait for job finish result ''' try: poll = kw.pop('salt_job_poll') except KeyError: poll = 0.1 try: target = kw.pop('salt_target') except KeyError: target = None try: timeout = int(kw.pop('salt_timeout')) except (KeyError, ValueError): # try to has some low timeouts for very basic commands timeout = __FUN_TIMEOUT.get( fun, 900 # wait up to 15 minutes for the default timeout ) try: kwargs = kw.pop('kwargs') except KeyError: kwargs = {} if not target: infos = get_configured_provider() if not infos: return target = infos['target'] laps = time.time() cache = False if fun in __CACHED_FUNS: cache = True laps = laps // __CACHED_FUNS[fun] try: sargs = json.dumps(args) except TypeError: sargs = '' try: skw = json.dumps(kw) except TypeError: skw = '' try: skwargs = json.dumps(kwargs) except TypeError: skwargs = '' cache_key = (laps, target, fun, sargs, skw, skwargs) if not cache or (cache and (cache_key not in __CACHED_CALLS)): conn = _client() runner = _runner() rkwargs = kwargs.copy() rkwargs['timeout'] = timeout rkwargs.setdefault('expr_form', 'list') kwargs.setdefault('expr_form', 'list') ping_retries = 0 # the target(s) have environ one minute to respond # we call 60 ping request, this prevent us # from blindly send commands to unmatched minions ping_max_retries = 60 ping = True # do not check ping... if we are pinguing if fun == 'test.ping': ping_retries = ping_max_retries + 1 # be sure that the executors are alive while ping_retries <= ping_max_retries: try: if ping_retries > 0: time.sleep(1) pings = conn.cmd(tgt=target, timeout=10, fun='test.ping') values = pings.values() if not values: ping = False for v in values: if v is not True: ping = False if not ping: raise ValueError('Unreachable') break except Exception: ping = False ping_retries += 1 log.error('{0} unreachable, retrying'.format(target)) if not ping: raise SaltCloudSystemExit('Target {0} unreachable'.format(target)) jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs) cret = conn.cmd(tgt=target, fun='saltutil.find_job', arg=[jid], timeout=10, **kwargs) running = bool(cret.get(target, False)) endto = time.time() + timeout while running: rkwargs = { 'tgt': target, 'fun': 'saltutil.find_job', 'arg': [jid], 'timeout': 10 } cret = conn.cmd(**rkwargs) running = bool(cret.get(target, False)) if not running: break if running and (time.time() > endto): raise Exception('Timeout {0}s for {1} is elapsed'.format( timeout, pformat(rkwargs))) time.sleep(poll) # timeout for the master to return data about a specific job wait_for_res = float({ 'test.ping': '5', }.get(fun, '120')) while wait_for_res: wait_for_res -= 0.5 cret = runner.cmd( 'jobs.lookup_jid', [jid, {'__kwarg__': True}]) if target in cret: ret = cret[target] break # special case, some answers may be crafted # to handle the unresponsivness of a specific command # which is also meaningful, e.g. a minion not yet provisioned if fun in ['test.ping'] and not wait_for_res: ret = { 'test.ping': False, }.get(fun, False) time.sleep(0.5) try: if 'is not available.' in ret: raise SaltCloudSystemExit( 'module/function {0} is not available'.format(fun)) except SaltCloudSystemExit: raise except TypeError: pass if cache: __CACHED_CALLS[cache_key] = ret elif cache and cache_key in __CACHED_CALLS: ret = __CACHED_CALLS[cache_key] return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') vm_['key_filename'] = key_filename salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for {0[name]!r}'.format(vm_)) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value('keysize', vm_, __opts__)) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[ 'change_password'] is True: vm_['password'] = sup.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id def __query_node_data(vm_, data): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(node))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return result = [] if 'private_ips' not in node and 'public_ips' not in node and \ 'access_ip' in node.get('extra', {}): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn(('Public IP address was not ready when we last' ' checked. Appending public IP address now.')) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on Openstack and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.' ) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = { 'name': vm_['name'] } try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ) ) try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ) ) # Note: This currently requires libcloud trunk avz = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) if avz is not None: kwargs['ex_availability_zone'] = avz kwargs['ex_keyname'] = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_cloud_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value( 'networks', vm_, __opts__, search_global=False ) floating = [] if HAS014: if networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection ) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud # to dynamically allocate new address but that might raise SaltCloudSystemExit( 'Floating pool {0!r} does not have any more ' 'please create some more or use a different ' 'pool.'.format(net['floating']) ) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool( '', conn.connection ) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud to # dynamically allocate new address but that might be # tricky to manage. raise SaltCloudSystemExit( 'There are no more floating IP addresses ' 'available, please create some more' ) except Exception as e: if str(e).startswith('404'): pass else: raise vm_['floating'] = floating files = config.get_cloud_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value( 'userdata_file', vm_, __opts__, search_global=False ) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': {'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile']}}, transport=__opts__['transport'] ) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False ) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) return data, vm_ except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ) )
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format(vm_['name'], vm_['image'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format(vm_['name'], vm_['size'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) floating = [] if HAS014 and networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool(net['floating'], conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. For # now, we raise an execption and exit. A future enhancement # might be to allow salt-cloud to dynamically allociate new # address but that might be tricky to manage. raise SaltCloudSystemExit( "Floating pool '%s' has not more address available, " "please create some more or use a different pool." % net['floating']) userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name } }, ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_, data, floating): try: nodelist = list_nodes() log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(nodelist[vm_['name']]))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']]['state'] == node_state( NodeState.RUNNING) if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = nodelist[vm_['name']].get('extra') rc_status = extra.get('metadata').get( 'rackconnect_automation_status') access_ip = extra.get('access_ip') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = nodelist[vm_['name']].get('extra') mc_status = extra.get('metadata').get( 'rax_service_level_automation') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info('Attaching floating IP "{0}" to node "{1}"'.format( ip, name)) except Exception as e: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = nodelist[vm_['name']]['private_ips'] public = nodelist[vm_['name']]['public_ips'] if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn( 'Public IP address was not ready when we last checked. Appending public IP address now.' ) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data, floating), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') deploy_kwargs = { 'host': ip_address, 'name': vm_['name'], 'sock_dir': __opts__['sock_dir'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } if ssh_username != 'root': deploy_kwargs['username'] = ssh_username deploy_kwargs['tty'] = True log.debug('Using {0} as SSH username'.format(ssh_username)) if key_filename is not None: deploy_kwargs['key_filename'] = key_filename log.debug('Using {0} as SSH key file'.format(key_filename)) elif 'password' in data.extra: deploy_kwargs['password'] = data.extra['password'] log.debug('Logging into SSH using password') ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs['script'] = deploy_script.script # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to deploy and start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data.__dict__) if 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def list_hosted_services(conn=None, call=None): ''' List VMs on this Azure account, with full information ''' if call == 'action': raise SaltCloudSystemExit( 'The list_hosted_services function must be called with ' '-f or --function') if not conn: conn = get_conn() ret = {} services = conn.list_hosted_services() for service in services: props = service.hosted_service_properties ret[service.service_name] = { 'name': service.service_name, 'url': service.url, 'affinity_group': props.affinity_group, 'date_created': props.date_created, 'date_last_modified': props.date_last_modified, 'description': props.description, 'extended_properties': props.extended_properties, 'label': props.label, 'location': props.location, 'status': props.status, 'deployments': {}, } deployments = conn.get_hosted_service_properties( service_name=service.service_name, embed_detail=True) for deployment in deployments.deployments: ret[service.service_name]['deployments'][deployment.name] = { 'configuration': deployment.configuration, 'created_time': deployment.created_time, 'deployment_slot': deployment.deployment_slot, 'extended_properties': deployment.extended_properties, 'input_endpoint_list': deployment.input_endpoint_list, 'label': deployment.label, 'last_modified_time': deployment.last_modified_time, 'locked': deployment.locked, 'name': deployment.name, 'persistent_vm_downtime_info': deployment.persistent_vm_downtime_info, 'private_id': deployment.private_id, 'role_instance_list': {}, 'role_list': {}, 'rollback_allowed': deployment.rollback_allowed, 'sdk_version': deployment.sdk_version, 'status': deployment.status, 'upgrade_domain_count': deployment.upgrade_domain_count, 'upgrade_status': deployment.upgrade_status, 'url': deployment.url, } for role_instance in deployment.role_instance_list: ret[service.service_name]['deployments'][deployment.name][ 'role_instance_list'][role_instance.role_name] = { 'fqdn': role_instance.fqdn, 'instance_error_code': role_instance.instance_error_code, 'instance_fault_domain': role_instance.instance_fault_domain, 'instance_name': role_instance.instance_name, 'instance_size': role_instance.instance_size, 'instance_state_details': role_instance.instance_state_details, 'instance_status': role_instance.instance_status, 'instance_upgrade_domain': role_instance.instance_upgrade_domain, 'ip_address': role_instance.ip_address, 'power_state': role_instance.power_state, 'role_name': role_instance.role_name, } for role in deployment.role_list: ret[service.service_name]['deployments'][ deployment.name]['role_list'][role.role_name] = { 'role_name': role.role_name, 'os_version': role.os_version, } role_info = conn.get_role( service_name=service.service_name, deployment_name=deployment.name, role_name=role.role_name, ) ret[service.service_name]['deployments'][deployment.name][ 'role_list'][role.role_name]['role_info'] = { 'availability_set_name': role_info.availability_set_name, 'configuration_sets': role_info.configuration_sets, 'data_virtual_hard_disks': role_info.data_virtual_hard_disks, 'os_version': role_info.os_version, 'role_name': role_info.role_name, 'role_size': role_info.role_size, 'role_type': role_info.role_type, } ret[service.service_name]['deployments'][ deployment.name]['role_list'][role.role_name]['role_info'][ 'os_virtual_hard_disk'] = { 'disk_label': role_info.os_virtual_hard_disk.disk_label, 'disk_name': role_info.os_virtual_hard_disk.disk_name, 'host_caching': role_info.os_virtual_hard_disk.host_caching, 'media_link': role_info.os_virtual_hard_disk.media_link, 'os': role_info.os_virtual_hard_disk.os, 'source_image_name': role_info.os_virtual_hard_disk.source_image_name, } return ret
def __init__(self, username, project_id, auth_url, region_name=None, password=None, os_auth_plugin=None, **kwargs): ''' Set up nova credentials ''' if not HAS_NOVA: return None self.kwargs = kwargs.copy() self.kwargs['username'] = username self.kwargs['project_id'] = project_id self.kwargs['auth_url'] = auth_url self.kwargs['region_name'] = region_name self.kwargs['service_type'] = 'compute' if not os_auth_plugin is None: novaclient.auth_plugin.discover_auth_systems() auth_plugin = novaclient.auth_plugin.load_plugin(os_auth_plugin) self.kwargs['auth_plugin'] = auth_plugin self.kwargs['auth_system'] = os_auth_plugin if not 'api_key' in self.kwargs.keys(): self.kwargs['api_key'] = password extensions = [] if 'extensions' in kwargs: exts = [] for key, item in self.kwargs['extensions'].items(): mod = __import__(item.replace('-', '_')) exts.append(novaclient.extension.Extension(key, mod)) self.kwargs['extensions'] = exts self.kwargs = sanatize_novaclient(self.kwargs) if not hasattr(client.Client, '__exit__'): raise SaltCloudSystemExit( "Newer version of novaclient required for __exit__.") with client.Client(**self.kwargs) as conn: try: conn.client.authenticate() except novaclient.exceptions.AmbiguousEndpoints: raise SaltCloudSystemExit( "Nova provider requires a 'region_name' to be specified") self.kwargs['auth_token'] = conn.client.auth_token self.catalog = \ conn.client.service_catalog.catalog['access']['serviceCatalog'] if not region_name is None: servers_endpoints = get_entry(self.catalog, 'type', 'compute')['endpoints'] self.kwargs['bypass_url'] = get_entry( servers_endpoints, 'region', region_name.upper())['publicURL'] self.compute_conn = client.Client(**self.kwargs) if not region_name is None: servers_endpoints = get_entry(self.catalog, 'type', 'volume')['endpoints'] self.kwargs['bypass_url'] = get_entry( servers_endpoints, 'region', region_name.upper())['publicURL'] self.kwargs['service_type'] = 'volume' self.volume_conn = client.Client(**self.kwargs)
def create(vm_): ''' Create a single VM from a data dict ''' key_filename = config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename ) ) location = get_location(vm_) log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location)) conn = get_conn(location=location) usernames = ssh_username(vm_) kwargs = { 'ssh_key': config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False ), 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_), 'location': get_availability_zone(conn, vm_) } ex_keyname = keyname(vm_) if ex_keyname: kwargs['ex_keyname'] = ex_keyname ex_securitygroup = securitygroup(vm_) if ex_securitygroup: kwargs['ex_securitygroup'] = ex_securitygroup ex_blockdevicemappings = block_device_mappings(vm_) if ex_blockdevicemappings: kwargs['ex_blockdevicemappings'] = ex_blockdevicemappings ex_iam_profile = iam_profile(vm_) if ex_iam_profile: # libcloud does not implement 'iam_profile' yet. # A pull request has been suggested # https://github.com/apache/libcloud/pull/150 raise SaltCloudConfigError( 'libcloud does not implement \'iam_profile\' yet. ' 'Use EC2 driver instead.' ) tags = config.get_cloud_config_value('tag', vm_, __opts__, {}, search_global=False) if not isinstance(tags, dict): raise SaltCloudConfigError( '\'tag\' should be a dict.' ) kwargs['ex_metadata'] = config.get_cloud_config_value('metadata', vm_, __opts__, default={}, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError( '\'metadata\' should be a dict.' ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on AWS\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False log.info('Created node {0}'.format(vm_['name'])) def __get_node_data(conn, vm_name): data = get_node(conn, vm_name) if data is None: # Trigger a failure in the waiting function return False if ssh_interface(vm_) == 'private_ips' and data.private_ips: return data if ssh_interface(vm_) == 'public_ips' and data.public_ips: return data try: data = salt.utils.cloud.wait_for_ip( __get_node_data, update_args=(conn, vm_['name']), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=0.5), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) if tags: set_tags(vm_['name'], tags, call='action') if ssh_interface(vm_) == 'private_ips': log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0])) ip_address = data.private_ips[0] else: log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0])) ip_address = data.public_ips[0] username = '******' ssh_connect_timeout = config.get_cloud_config_value( 'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes ) if salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout): for user in usernames: if salt.utils.cloud.wait_for_passwd( host=ip_address, username=user, ssh_timeout=config.get_cloud_config_value( 'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60), key_filename=key_filename): username = user break else: raise SaltCloudSystemExit( 'Failed to authenticate against remote ssh' ) ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'host': ip_address, 'username': username, 'key_filename': key_filename, 'tmp_dir': config.get_cloud_config_value( 'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud' ), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'tty': config.get_cloud_config_value( 'tty', vm_, __opts__, default=True ), 'script': deploy_script.script, 'name': vm_['name'], 'sudo': config.get_cloud_config_value( 'sudo', vm_, __opts__, default=(username != 'root') ), 'sudo_password': config.get_cloud_config_value( 'sudo_password', vm_, __opts__, default=None ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'conf_file': __opts__['conf_file'], 'sock_dir': __opts__['sock_dir'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM ret['deploy_kwargs'] = deploy_kwargs deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {name}'.format(**vm_)) else: log.error('Failed to start Salt on Cloud VM {name}'.format(**vm_)) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__) ) ) volumes = config.get_cloud_config_value( 'volumes', vm_, __opts__, search_global=True ) if volumes: log.info('Create and attach volumes to node {0}'.format(data.name)) create_attach_volumes(volumes, location, data) return ret
def _salt(fun, *args, **kw): '''Execute a salt function on a specific minion''' try: target = kw.pop('salt_target') except KeyError: target = None try: timeout = int(kw.pop('salt_timeout')) except (KeyError, ValueError): # try to has some low timeouts for very basic commands timeout = __FUN_TIMEOUT.get( fun, 900 # wait up to 15 minutes for the default timeout ) try: kwargs = kw.pop('kwargs') except KeyError: kwargs = {} if not target: infos = get_configured_provider() if not infos: return target = infos['target'] laps = time.time() cache = False if fun in __CACHED_FUNS: cache = True laps = laps // __CACHED_FUNS[fun] try: sargs = json.dumps(args) except TypeError: sargs = '' try: skw = json.dumps(kw) except TypeError: skw = '' try: skwargs = json.dumps(kwargs) except TypeError: skwargs = '' cache_key = (laps, target, fun, sargs, skw, skwargs) if not cache or (cache and (not cache_key in __CACHED_CALLS)): conn = _client() runner = _runner() jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **kwargs) cret = conn.cmd(tgt=target, fun='saltutil.find_job', arg=[jid], timeout=10, **kwargs) running = bool(cret.get(target, False)) itimeout = timeout while running: timeout -= 2 cret = conn.cmd(tgt=target, fun='saltutil.find_job', arg=[jid], timeout=10, **kwargs) running = bool(cret.get(target, False)) if not running: break if running and not timeout: raise Exception('Timeout {0} is elapsed'.format(itimeout)) time.sleep(2) # timeout for the master to return data about a specific job wait_for_res = float({ 'test.ping': '5', }.get(fun, '120')) while wait_for_res: wait_for_res -= 0.5 cret = runner.cmd( 'jobs.lookup_jid', [jid, {'__kwarg__': True, 'output': False}]) if target in cret: ret = cret[target] break # special case, some answers may be crafted # to handle the unresponsivness of a specific command # which is also meaningfull, eg a minion not yet provisionned if fun in ['test.ping'] and not wait_for_res: ret = { 'test.ping': False, }.get(fun, False) time.sleep(0.5) try: if 'is not available.' in ret: raise SaltCloudSystemExit( 'module/function {0} is not available'.format(fun)) except SaltCloudSystemExit: raise except TypeError: pass if cache: __CACHED_CALLS[cache_key] = ret elif cache and cache_key in __CACHED_CALLS: ret = __CACHED_CALLS[cache_key] return ret
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' ret = {} deploy = config.get_cloud_config_value('deploy', vm_, __opts__) if deploy is True and salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'sshpass\' binary is not ' 'present on the system.') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) try: data = create_node(vm_) except Exception as exc: log.error( 'Error creating {0} on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc.message), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = str(vm_['ip_address']) elif 'public_ips' in data: ip_address = str(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = str(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure # err.. not a good idea i reckon log.debug('Using IP address {0}'.format(ip_address)) # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return { 'Error': 'Unable to create {0}, command timed out'.format(name) } # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node {0} ({1}) failed to start!'.format(name, vmid)) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm {0} on {1}'.format( vmid, host)) if not wait_for_state(vmid, host, nodeType, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password # Check whether we need to deploy and are on OpenVZ rather than KVM which # does not (yet) provide support for automatic provisioning if config.get_cloud_config_value( 'deploy', vm_, __opts__) is True and config.get_cloud_config_value( 'technology', vm_, __opts__) == 'openvz': deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'username': ssh_username, 'password': ssh_password, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] # Store what was used to the deploy the VM ret['deploy_kwargs'] = deploy_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) # END Install Salt role(s) # Report success! log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() kwargs = { 'hostname': vm_['name'], 'domain': vm_['domain'], 'startCpus': vm_['cpu_number'], 'maxMemory': vm_['ram'], 'localDiskFlag': vm_['local_disk'], 'hourlyBillingFlag': vm_['hourly_billing'], } if 'image' in vm_: kwargs['operatingSystemReferenceCode'] = vm_['image'] kwargs['blockDevices'] = [{ 'device': '0', 'diskImage': { 'capacity': vm_['disk_size'] }, }] elif 'global_identifier' in vm_: kwargs['blockDeviceTemplateGroup'] = { 'globalIdentifier': vm_['global_identifier'] } location = get_location(vm_) if location: kwargs['datacenter'] = {'name': location} private_vlan = config.get_cloud_config_value('private_vlan', vm_, __opts__, default=False) if private_vlan: kwargs['primaryBackendNetworkComponent'] = { 'networkVlan': { 'id': private_vlan, } } private_network = config.get_cloud_config_value('private_network', vm_, __opts__, default=False) if bool(private_network) is True: kwargs['privateNetworkOnlyFlag'] = 'True' public_vlan = config.get_cloud_config_value('public_vlan', vm_, __opts__, default=False) if public_vlan: kwargs['primaryNetworkComponent'] = { 'networkVlan': { 'id': public_vlan, } } max_net_speed = config.get_cloud_config_value('max_net_speed', vm_, __opts__, default=10) if max_net_speed: kwargs['networkComponents'] = [{'maxSpeed': int(max_net_speed)}] salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: response = conn.createObject(kwargs) except Exception as exc: log.error( 'Error creating {0} on SoftLayer\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False ip_type = 'primaryIpAddress' private_ssh = config.get_cloud_config_value('private_ssh', vm_, __opts__, default=False) private_wds = config.get_cloud_config_value('private_windows', vm_, __opts__, default=False) if private_ssh or private_wds: ip_type = 'primaryBackendIpAddress' def wait_for_ip(): ''' Wait for the IP address to become available ''' nodes = list_nodes_full() if ip_type in nodes[vm_['name']]: return nodes[vm_['name']][ip_type] time.sleep(1) return False ip_address = salt.utils.cloud.wait_for_fun( wait_for_ip, timeout=config.get_cloud_config_value('wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) if config.get_cloud_config_value('deploy', vm_, __opts__) is not True: return show_instance(vm_['name'], call='action') SSH_PORT = 22 WINDOWS_DS_PORT = 445 managing_port = SSH_PORT if config.get_cloud_config_value('windows', vm_, __opts__) or \ config.get_cloud_config_value('win_installer', vm_, __opts__): managing_port = WINDOWS_DS_PORT ssh_connect_timeout = config.get_cloud_config_value( 'ssh_connect_timeout', vm_, __opts__, 15 * 60) connect_timeout = config.get_cloud_config_value('connect_timeout', vm_, __opts__, ssh_connect_timeout) if not salt.utils.cloud.wait_for_port( ip_address, port=managing_port, timeout=connect_timeout): raise SaltCloudSystemExit('Failed to authenticate against remote ssh') pass_conn = get_conn(service='SoftLayer_Account') mask = { 'virtualGuests': { 'powerState': '', 'operatingSystem': { 'passwords': '' }, }, } def get_credentials(): ''' Wait for the password to become available ''' node_info = pass_conn.getVirtualGuests(id=response['id'], mask=mask) for node in node_info: if node['id'] == response['id']: if 'passwords' in node['operatingSystem'] and len( node['operatingSystem']['passwords']) > 0: return node['operatingSystem']['passwords'][0][ 'username'], node['operatingSystem']['passwords'][0][ 'password'] time.sleep(5) return False username, passwd = salt.utils.cloud.wait_for_fun( # pylint: disable=W0633 get_credentials, timeout=config.get_cloud_config_value('wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) response['username'] = username response['password'] = passwd response['public_ip'] = ip_address ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default=username) ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'username': ssh_username, 'password': passwd, 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default=username) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default=passwd) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(response))) ret.update(response) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def destroy(name, conn=None, call=None): ''' Delete a single VM ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) salt.utils.cloud.fire_event( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), {'name': name}, transport=__opts__['transport'] ) if not conn: conn = get_conn() # pylint: disable=E0602 node = conn.server_by_name(name) profiles = get_configured_provider()['profiles'] # pylint: disable=E0602 if node is None: log.error('Unable to find the VM {0}'.format(name)) profile = None if 'metadata' in node.extra and 'profile' in node.extra['metadata']: profile = node.extra['metadata']['profile'] flush_mine_on_destroy = False if profile is not None and profile in profiles: if 'flush_mine_on_destroy' in profiles[profile]: flush_mine_on_destroy = profiles[profile]['flush_mine_on_destroy'] if flush_mine_on_destroy: log.info('Clearing Salt Mine: {0}'.format(name)) salt_client = salt.client.get_local_client(__opts__['conf_file']) minions = salt_client.cmd(name, 'mine.flush') log.info('Clearing Salt Mine: {0}, {1}'.format( name, flush_mine_on_destroy )) log.info('Destroying VM: {0}'.format(name)) ret = conn.delete(node.id) if ret: log.info('Destroyed VM: {0}'.format(name)) # Fire destroy action salt.utils.cloud.fire_event( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), {'name': name}, transport=__opts__['transport'] ) if __opts__.get('delete_sshkeys', False) is True: salt.utils.cloud.remove_sshkey(getattr(node, __opts__.get('ssh_interface', 'public_ips'))[0]) if __opts__.get('update_cachedir', False) is True: salt.utils.cloud.delete_minion_cachedir(name, __active_provider_name__.split(':')[0], __opts__) return True log.error('Failed to Destroy VM: {0}'.format(name)) return False
def deploy_script(host, port=22, timeout=900, username='******', password=None, key_filename=None, script=None, deploy_command='/tmp/deploy.sh', sudo=False, tty=None, name=None, pub_key=None, sock_dir=None, provider=None, conf_file=None, start_action=None, make_master=False, master_pub=None, master_pem=None, master_conf=None, minion_pub=None, minion_pem=None, minion_conf=None, keep_tmp=False, script_args=None, script_env=None, ssh_timeout=15, make_syndic=False, make_minion=True, display_ssh_output=True, preseed_minion_keys=None, parallel=False, **kwargs): ''' Copy a deploy script to a remote server, execute it, and remove it ''' if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename)) starttime = time.mktime(time.localtime()) log.debug('Deploying {0} at {1}'.format(host, starttime)) if wait_for_port(host=host, port=port): log.debug('SSH port {0} on {1} is available'.format(port, host)) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) if wait_for_passwd(host, port=port, username=username, password=password, key_filename=key_filename, ssh_timeout=ssh_timeout, display_ssh_output=display_ssh_output): log.debug('Logging into {0}:{1} as {2}'.format( host, port, username)) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) kwargs = { 'hostname': host, 'port': port, 'username': username, 'timeout': ssh_timeout, 'display_ssh_output': display_ssh_output } if key_filename: log.debug('Using {0} as the key_filename'.format(key_filename)) kwargs['key_filename'] = key_filename elif password: log.debug('Using {0} as the password'.format(password)) kwargs['password'] = password #FIXME: this try-except doesn't make sense! Something is missing... try: log.debug('SSH connection to {0} successful'.format(host)) except Exception as exc: log.error( 'There was an error in deploy_script: {0}'.format(exc)) if provider == 'ibmsce': subsys_command = ('sed -i "s/#Subsystem/Subsystem/" ' '/etc/ssh/sshd_config') root_cmd(subsys_command, tty, sudo, **kwargs) root_cmd('service sshd restart', tty, sudo, **kwargs) # Minion configuration if minion_pem: scp_file('/tmp/minion.pem', minion_pem, kwargs) root_cmd('chmod 600 /tmp/minion.pem', tty, sudo, **kwargs) if minion_pub: scp_file('/tmp/minion.pub', minion_pub, kwargs) if minion_conf: if not isinstance(minion_conf, dict): # Let's not just fail regarding this change, specially # since we can handle it raise DeprecationWarning( '`salt.cloud.utils.deploy_script now only accepts ' 'dictionaries for it\'s `minion_conf` parameter. ' 'Loading YAML...') minion_grains = minion_conf.pop('grains', {}) if minion_grains: scp_file('/tmp/grains', salt_config_to_yaml(minion_grains), kwargs) scp_file('/tmp/minion', salt_config_to_yaml(minion_conf), kwargs) # Master configuration if master_pem: scp_file('/tmp/master.pem', master_pem, kwargs) root_cmd('chmod 600 /tmp/master.pem', tty, sudo, **kwargs) if master_pub: scp_file('/tmp/master.pub', master_pub, kwargs) if master_conf: if not isinstance(master_conf, dict): # Let's not just fail regarding this change, specially # since we can handle it raise DeprecationWarning( '`salt.cloud.utils.deploy_script now only accepts ' 'dictionaries for it\'s `master_conf` parameter. ' 'Loading from YAML ...') scp_file('/tmp/master', salt_config_to_yaml(master_conf), kwargs) # XXX: We need to make these paths configurable preseed_minion_keys_tempdir = '/tmp/preseed-minion-keys' if preseed_minion_keys is not None: # Create remote temp dir root_cmd('mkdir "{0}"'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) root_cmd('chmod 700 "{0}"'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) if kwargs['username'] != 'root': root_cmd( 'chown {0} "{1}"'.format(kwargs['username'], preseed_minion_keys_tempdir), tty, sudo, **kwargs) # Copy pre-seed minion keys for minion_id, minion_key in preseed_minion_keys.iteritems(): rpath = os.path.join(preseed_minion_keys_tempdir, minion_id) scp_file(rpath, minion_key, kwargs) if kwargs['username'] != 'root': root_cmd( 'chown -R root "{0}"'.format( preseed_minion_keys_tempdir), tty, sudo, **kwargs) # The actual deploy script if script: scp_file('/tmp/deploy.sh', script, kwargs) root_cmd('chmod +x /tmp/deploy.sh', tty, sudo, **kwargs) newtimeout = timeout - (time.mktime(time.localtime()) - starttime) queue = None process = None # Consider this code experimental. It causes Salt Cloud to wait # for the minion to check in, and then fire a startup event. # Disabled if parallel because it doesn't work! if start_action and not parallel: queue = multiprocessing.Queue() process = multiprocessing.Process(target=check_auth, kwargs=dict( name=name, pub_key=pub_key, sock_dir=sock_dir, timeout=newtimeout, queue=queue)) log.debug('Starting new process to wait for salt-minion') process.start() # Run the deploy script if script: if 'bootstrap-salt' in script: deploy_command += ' -c /tmp/' if make_syndic is True: deploy_command += ' -S' if make_master is True: deploy_command += ' -M' if make_minion is False: deploy_command += ' -N' if keep_tmp is True: deploy_command += ' -K' if preseed_minion_keys is not None: deploy_command += ' -k {0}'.format( preseed_minion_keys_tempdir) if script_args: deploy_command += ' {0}'.format(script_args) if script_env: if not isinstance(script_env, dict): raise SaltCloudSystemExit( 'The \'script_env\' configuration setting NEEDS ' 'to be a dictionary not a {0}'.format( type(script_env))) environ_script_contents = ['#!/bin/sh'] for key, value in script_env.iteritems(): environ_script_contents.append( 'setenv {0} \'{1}\' >/dev/null 2>&1 || ' 'export {0}=\'{1}\''.format(key, value)) environ_script_contents.append(deploy_command) # Upload our environ setter wrapper scp_file('/tmp/environ-deploy-wrapper.sh', '\n'.join(environ_script_contents), kwargs) root_cmd('chmod +x /tmp/environ-deploy-wrapper.sh', tty, sudo, **kwargs) # The deploy command is now our wrapper deploy_command = '/tmp/environ-deploy-wrapper.sh' if root_cmd(deploy_command, tty, sudo, **kwargs) != 0: raise SaltCloudSystemExit( 'Executing the command {0!r} failed'.format( deploy_command)) log.debug('Executed command {0!r}'.format(deploy_command)) # Remove the deploy script if not keep_tmp: root_cmd('rm -f /tmp/deploy.sh', tty, sudo, **kwargs) log.debug('Removed /tmp/deploy.sh') if script_env: root_cmd('rm -f /tmp/environ-deploy-wrapper.sh', tty, sudo, **kwargs) log.debug('Removed /tmp/environ-deploy-wrapper.sh') if keep_tmp: log.debug('Not removing deployment files from /tmp/') # Remove minion configuration if not keep_tmp: if minion_pub: root_cmd('rm -f /tmp/minion.pub', tty, sudo, **kwargs) log.debug('Removed /tmp/minion.pub') if minion_pem: root_cmd('rm -f /tmp/minion.pem', tty, sudo, **kwargs) log.debug('Removed /tmp/minion.pem') if minion_conf: root_cmd('rm -f /tmp/grains', tty, sudo, **kwargs) log.debug('Removed /tmp/grains') root_cmd('rm -f /tmp/minion', tty, sudo, **kwargs) log.debug('Removed /tmp/minion') # Remove master configuration if master_pub: root_cmd('rm -f /tmp/master.pub', tty, sudo, **kwargs) log.debug('Removed /tmp/master.pub') if master_pem: root_cmd('rm -f /tmp/master.pem', tty, sudo, **kwargs) log.debug('Removed /tmp/master.pem') if master_conf: root_cmd('rm -f /tmp/master', tty, sudo, **kwargs) log.debug('Removed /tmp/master') # Remove pre-seed keys directory if preseed_minion_keys is not None: root_cmd('rm -rf {0}'.format(preseed_minion_keys_tempdir), tty, sudo, **kwargs) log.debug( 'Removed {0}'.format(preseed_minion_keys_tempdir)) if start_action and not parallel: queuereturn = queue.get() process.join() if queuereturn and start_action: #client = salt.client.LocalClient(conf_file) #output = client.cmd_iter( # host, 'state.highstate', timeout=timeout #) #for line in output: # print(line) log.info('Executing {0} on the salt-minion'.format( start_action)) root_cmd('salt-call {0}'.format(start_action), tty, sudo, **kwargs) log.info( 'Finished executing {0} on the salt-minion'.format( start_action)) # Fire deploy action fire_event( 'event', '{0} has been deployed at {1}'.format(name, host), 'salt/cloud/{0}/deploy_script'.format(name), ) return True return False
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() vm_['location'] = config.get_cloud_config_value('location', vm_, __opts__) kwargs = { 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_), 'location': get_location(conn, vm_), 'auth': NodeAuthSSHKey( config.get_cloud_config_value('ssh_key_name', vm_, __opts__)) } log.debug('Creating instance on {0} at {1}'.format( time.strftime('%Y-%m-%d'), time.strftime('%H:%M:%S'))) salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'location': kwargs['location'].name } }, ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on IBMSCE\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_name, data): nodelist = list_nodes() public_ips = nodelist[vm_name]['public_ips'] private_ips = nodelist[vm_name]['private_ips'] if private_ips: data.private_ips = private_ips if public_ips: data.public_ips = public_ips if ssh_interface(vm_) == 'private_ips' and private_ips: return data if ssh_interface(vm_) == 'public_ips' and public_ips: return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=25 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=15), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) log.debug('Deploying {0} using IP address {1}'.format( vm_['name'], data.public_ips[0])) deploy_kwargs = { 'host': data.public_ips[0], 'username': '******', 'provider': 'ibmsce', 'password': data.extra['password'], 'key_filename': key_filename, 'script': deploy_script.script, 'name': vm_['name'], 'sudo': True, 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) ret.update(data.__dict__) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() driver = get_driver(Provider.OPENSTACK) authinfo = { 'ex_force_auth_url': config.get_cloud_config_value( 'identity_url', vm_, __opts__, search_global=False ), 'ex_force_service_name': config.get_cloud_config_value( 'compute_name', vm_, __opts__, search_global=False ), 'ex_force_service_region': config.get_cloud_config_value( 'compute_region', vm_, __opts__, search_global=False ), 'ex_tenant_name': config.get_cloud_config_value( 'tenant', vm_, __opts__, search_global=False ), } service_type = config.get_cloud_config_value('service_type', vm_, __opts__, search_global=False) if service_type: authinfo['ex_force_service_type'] = service_type base_url = config.get_cloud_config_value('base_url', vm_, __opts__, search_global=False) if base_url: authinfo['ex_force_base_url'] = base_url insecure = config.get_cloud_config_value( 'insecure', vm_, __opts__, search_global=False ) if insecure: import libcloud.security libcloud.security.VERIFY_SSL_CERT = False user = config.get_cloud_config_value( 'user', vm_, __opts__, search_global=False ) password = config.get_cloud_config_value( 'password', vm_, __opts__, search_global=False ) if password is not None: authinfo['ex_force_auth_version'] = '2.0_password' log.debug('OpenStack authenticating using password') if password == 'USE_KEYRING': # retrieve password from system keyring credential_id = "salt.cloud.provider.{0}".format(__active_provider_name__) logging.debug("Retrieving keyring password for {0} ({1})".format( credential_id, user) ) # attempt to retrieve driver specific password first driver_password = salt.utils.cloud.retrieve_password_from_keyring( credential_id, user ) if driver_password is None: provider_password = salt.utils.cloud.retrieve_password_from_keyring( credential_id.split(':')[0], # fallback to provider level user) if provider_password is None: raise SaltCloudSystemExit( "Unable to retrieve password from keyring for provider {0}".format( __active_provider_name__ ) ) else: actual_password = provider_password else: actual_password = password return driver( user, actual_password, **authinfo ) authinfo['ex_force_auth_version'] = '2.0_apikey' log.debug('OpenStack authenticating using apikey') return driver( user, config.get_cloud_config_value('apikey', vm_, __opts__, search_global=False), **authinfo)
def create(vm_): ''' Provision a single machine ''' if config.get_cloud_config_value('deploy', vm_, __opts__) is False: return { 'Error': { 'No Deploy': '\'deploy\' is not enabled. Not deploying.' } } key_filename = config.get_cloud_config_value('key_filename', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_keyfile {0!r} does not exist'.format( key_filename)) if key_filename is None and salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_keyfile\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') ret = {} log.info('Provisioning existing machine {0}'.format(vm_['name'])) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__) deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': vm_['ssh_host'], 'username': ssh_username, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'password': config.get_cloud_config_value('password', vm_, __opts__, search_global=False), 'key_filename': key_filename, 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_), 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True) } if 'ssh_port' in vm_: deploy_kwargs.update({'port': vm_['ssh_port']}) if 'salt_host' in vm_: deploy_kwargs.update({'salt_host': vm_['salt_host']}) # forward any info about possible ssh gateway to deploy script # as some providers need also a 'gateway' configuration if 'gateway' in vm_: deploy_kwargs.update({'gateway': vm_['gateway']}) # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value('make_minion', vm_, __opts__, default=True) win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: ret['deployed'] = deployed log.info('Salt installed on {0}'.format(vm_['name'])) return ret log.error('Failed to start Salt on host {0}'.format(vm_['name'])) return { 'Error': { 'Not Deployed': 'Failed to start Salt on host {0}'.format(vm_['name']) } }
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p profile_name vm_name ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('private_key', vm_, __opts__, search_global=False, default=None) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'private_key\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0} in {1}'.format( vm_['name'], vm_.get('location', DEFAULT_LOCATION))) ## added . for fqdn hostnames salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9-.') kwargs = { 'name': vm_['name'], 'image': get_image(vm_), 'size': get_size(vm_), 'location': vm_.get('location', DEFAULT_LOCATION) } salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': kwargs}, ) try: data = create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on JOYENT\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False ret = {} def __query_node_data(vm_id, vm_location): rcode, data = query2(command='my/machines/{0}'.format(vm_id), method='GET', location=vm_location) if rcode not in VALID_RESPONSE_CODES: # Trigger a wait for IP error return False if data['state'] != 'running': # Still not running, trigger another iteration return if isinstance(data['ips'], list) and len(data['ips']) > 0: return data if 'ips' in data: if isinstance(data['ips'], list) and len(data['ips']) <= 0: log.info('New joyent asynchronous machine creation api detected...' '\n\t\t-- please wait for IP addresses to be assigned...') try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(data['id'], vm_.get('location', DEFAULT_LOCATION)), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=1), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) data = reformat_node(data) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: host = data['public_ips'][0] if ssh_interface(vm_) == 'private_ips': host = data['private_ips'][0] deploy_script = script(vm_) deploy_kwargs = { 'host': host, 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
data = salt.cloud.utils.wait_for_ip( __query_node_data, update_args=(vm_, data, floating), timeout=config.get_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif (rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips'): ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') deploy_kwargs = {
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) if deploy is True and salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'sshpass\' binary is not ' 'present on the system.') salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() kwargs = { 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_) } salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name } }, ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on RACKSPACE\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_, data): try: nodelist = list_nodes() log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(nodelist[vm_['name']]))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']]['state'] == node_state( NodeState.RUNNING) if not running: # Still not running, trigger another iteration return private = nodelist[vm_['name']]['private_ips'] public = nodelist[vm_['name']]['public_ips'] if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) else: log.warn('{0} is a private IP'.format(private_ip)) if private_ip not in data.private_ips: data.private_ips.append(private_ip) if ssh_interface(vm_) == 'private_ips' and data.private_ips: return data if private: data.private_ips = private if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=25 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if not ip_address: raise SaltCloudSystemExit('No IP addresses could be found.') ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') ret = {} if deploy is True: deploy_script = script(vm_) deploy_kwargs = { 'host': ip_address, 'username': ssh_username, 'password': data.extra['password'], 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to deploy and start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) ret.update(data.__dict__) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_config_value('deploy', vm_, __opts__) key_filename = config.get_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename ) ) if deploy is True and key_filename is None and \ salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'ssh_key_file\' setting ' 'is not set and \'sshpass\' binary is not present on the ' 'system for the password.' ) salt.cloud.utils.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.cloud.utils.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = { 'name': vm_['name'] } try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: log.error( 'Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False kwargs['ex_keyname'] = config.get_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) security_groups = config.get_config_value( 'security_groups', vm_, __opts__, search_global=False ) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg) ) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_config_value( 'networks', vm_, __opts__, search_global=False ) floating = [] if HAS014 and networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection ) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. For # now, we raise an execption and exit. A future enhancement # might be to allow salt-cloud to dynamically allociate new # address but that might be tricky to manage. raise SaltCloudSystemExit( "Floating pool '%s' has not more address available, " "please create some more or use a different pool." % net['floating'] ) files = config.get_config_value( 'files', vm_, __opts__, search_global=False ) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_config_value( 'userdata_file', vm_, __opts__, search_global=False ) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() salt.cloud.utils.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': {'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name}}, ) kwargs['ex_metadata'] = config.get_config_value( 'metadata', vm_, __opts__, default={}, search_global=False ) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError( '\'metadata\' should be a dict.' ) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format( vm_['name'], exc ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) return False def __query_node_data(vm_, data, floating): try: nodelist = list_nodes() log.debug( 'Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat( nodelist[vm_['name']] ) ) ) except Exception, err: log.error( 'Failed to get nodes list: {0}'.format( err ), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG) ) # Trigger a failure in the wait for IP function return False running = nodelist[vm_['name']]['state'] == node_state( NodeState.RUNNING ) if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: extra = nodelist[vm_['name']].get('extra') rc_status = extra.get('metadata').get('rackconnect_automation_status') access_ip = extra.get('access_ip') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = nodelist[vm_['name']].get('extra') mc_status = extra.get('metadata').get('rax_service_level_automation') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info( 'Attaching floating IP "{0}" to node "{1}"'.format(ip, name) ) except Exception: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = nodelist[vm_['name']]['private_ips'] public = nodelist[vm_['name']]['public_ips'] if private and not public: log.warn( 'Private IPs returned, but not public... Checking for ' 'misidentified IPs' ) for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.cloud.utils.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn( 'Public IP address was not ready when we last checked. Appending public IP address now.' ) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) kwargs['key_name'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in avail_groups.iteritems()]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['availability_zone'] = avz networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['userdata'] = fp.read() salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image_id'], 'size': kwargs['flavor_id'] } }, transport=__opts__['transport']) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', '') return data, vm_
def __init__(self, username, project_id, auth_url, region_name=None, password=None, os_auth_plugin=None, **kwargs): ''' Set up nova credentials ''' if not HAS_NOVA: return None self.kwargs = kwargs.copy() if not novaclient.utils.HookableMixin._hooks_map: self.extensions = self._discover_extensions('1.1') for extension in self.extensions: extension.run_hooks('__pre_parse_args__') self.kwargs['extensions'] = self.extensions self.kwargs['username'] = username self.kwargs['project_id'] = project_id self.kwargs['auth_url'] = auth_url self.kwargs['region_name'] = region_name self.kwargs['service_type'] = 'compute' # used in novaclient extensions to see if they are rackspace or not, to know if it needs to load # the hooks for that extension or not. This is cleaned up by sanatize_novaclient self.kwargs['os_auth_url'] = auth_url if os_auth_plugin is not None: novaclient.auth_plugin.discover_auth_systems() auth_plugin = novaclient.auth_plugin.load_plugin(os_auth_plugin) self.kwargs['auth_plugin'] = auth_plugin self.kwargs['auth_system'] = os_auth_plugin if not self.kwargs.get('api_key', None): self.kwargs['api_key'] = password # This has to be run before sanatize_novaclient before extra variables are cleaned out. if hasattr(self, 'extensions'): # needs an object, not a dictionary self.kwargstruct = KwargsStruct(**self.kwargs) for extension in self.extensions: extension.run_hooks('__post_parse_args__', self.kwargstruct) self.kwargs = self.kwargstruct.__dict__ self.kwargs = sanatize_novaclient(self.kwargs) if not hasattr(client.Client, '__exit__'): raise SaltCloudSystemExit( "Newer version of novaclient required for __exit__.") with client.Client(**self.kwargs) as conn: try: conn.client.authenticate() except novaclient.exceptions.AmbiguousEndpoints: raise SaltCloudSystemExit( "Nova provider requires a 'region_name' to be specified") self.kwargs['auth_token'] = conn.client.auth_token self.catalog = \ conn.client.service_catalog.catalog['access']['serviceCatalog'] if region_name is not None: servers_endpoints = get_entry(self.catalog, 'type', 'compute')['endpoints'] self.kwargs['bypass_url'] = get_entry(servers_endpoints, 'region', region_name)['publicURL'] self.compute_conn = client.Client(**self.kwargs) if region_name is not None: servers_endpoints = get_entry(self.catalog, 'type', 'volume')['endpoints'] self.kwargs['bypass_url'] = get_entry(servers_endpoints, 'region', region_name)['publicURL'] self.kwargs['service_type'] = 'volume' self.volume_conn = client.Client(**self.kwargs) if hasattr(self, 'extensions'): self.expand_extensions()
def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f ' 'or --function.') ret = {} location = get_location() params = { 'Action': 'DescribeInstanceStatus', 'RegionId': location, } result = query(params=params) log.debug('Total {0} instance found in Region {1}'.format( result['TotalCount'], location)) if 'Code' in result or result['TotalCount'] == 0: return ret for node in result['InstanceStatuses']['InstanceStatus']: instanceId = node.get('InstanceId', '') params = { 'Action': 'DescribeInstanceAttribute', 'InstanceId': instanceId } items = query(params=params) if 'Code' in items: log.warn('Query instance:{0} attribute failed'.format(instanceId)) continue ret[instanceId] = { 'id': items['InstanceId'], 'name': items['InstanceName'], 'image': items['ImageId'], 'size': 'TODO', 'state': items['Status'] } for item in items.keys(): value = items[item] if value is not None: value = str(value) if item == "PublicIpAddress": ret[instanceId]['public_ips'] = items[item]['IpAddress'] if item == "InnerIpAddress": ret[instanceId]['private_ips'] = items[item]['IpAddress'] ret[instanceId][item] = value provider = __active_provider_name__ or 'aliyun' if ':' in provider: comps = provider.split(':') provider = comps[0] __opts__['update_cachedir'] = True salt.utils.cloud.cache_node_list(ret, provider, __opts__) return ret
def get_entry(dict_, key, value): for entry in dict_: if entry[key] == value: return entry raise SaltCloudSystemExit('Unable to find {0} in {1}.'.format(key, dict_))
def avail_images(call=None): ''' Return a dict of all available VM images on the cloud provider. ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option') ret = { 'Operating System': { '13962': { 'id': '13962', 'name': 'CentOS 6.0 - Minimal Install (32 bit)' }, '13963': { 'id': '13963', 'name': 'CentOS 6.0 - Minimal Install (64 bit)' }, '13960': { 'id': '13960', 'name': 'CentOS 6.0 - LAMP Install (32 bit)' }, '13961': { 'id': '13961', 'name': 'CentOS 6.0 - LAMP Install (64 bit)' }, '1930': { 'id': '1930', 'name': 'CentOS 5 - Minimal Install (32 bit)' }, '1931': { 'id': '1931', 'name': 'CentOS 5 - Minimal Install (64 bit)' }, '1928': { 'id': '1928', 'name': 'CentOS 5 - LAMP Install (32 bit)' }, '1929': { 'id': '1929', 'name': 'CentOS 5 - LAMP Install (64 bit)' }, '14075': { 'id': '14075', 'name': 'Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (32 bit)' }, '14077': { 'id': '14077', 'name': 'Debian GNU/Linux 6.0 Squeeze/Stable - Minimal Install (64 bit)' }, '14074': { 'id': '14074', 'name': 'Debian GNU/Linux 6.0 Squeeze/Stable - LAMP Install (32 bit)' }, '14076': { 'id': '14076', 'name': 'Debian GNU/Linux 6.0 Squeeze/Stable - LAMP Install (64 bit)' }, '21774': { 'id': '21774', 'name': 'CloudLinux 6 (32 bit)' }, '21777': { 'id': '21777', 'name': 'CloudLinux 6 (64 bit)' }, '21768': { 'id': '21768', 'name': 'CloudLinux 5 (32 bit)' }, '21771': { 'id': '21771', 'name': 'CloudLinux 5 (64 bit)' }, '22247': { 'id': '22247', 'name': 'Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (32 bit)' }, '22251': { 'id': '22251', 'name': 'Debian GNU/Linux 7.0 Wheezy/Stable - Minimal Install (64 bit)' }, '21265': { 'id': '21265', 'name': 'FreeBSD 9 Latest (32 bit)' }, '21269': { 'id': '21269', 'name': 'FreeBSD 9 Latest (64 bit)' }, '21257': { 'id': '21257', 'name': 'FreeBSD 8 Latest (32 bit)' }, '21261': { 'id': '21261', 'name': 'FreeBSD 8 Latest (64 bit)' }, '2143': { 'id': '2143', 'name': 'Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (32 bit)' }, '2145': { 'id': '2145', 'name': 'Ubuntu Linux 10.04 LTS Lucid Lynx - Minimal Install (64 bit)' }, '2138': { 'id': '2138', 'name': 'Ubuntu Linux 10.04 LTS Lucid Lynx - LAMP Install (32 bit)' }, '2141': { 'id': '2141', 'name': 'Ubuntu Linux 10.04 LTS Lucid Lynx - LAMP Install (64 bit)' }, '17436': { 'id': '17436', 'name': 'Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (32 bit)' }, '17438': { 'id': '17438', 'name': 'Ubuntu Linux 12.04 LTS Precise Pangolin - Minimal Install (64 bit)' }, '17432': { 'id': '17432', 'name': 'Ubuntu Linux 12.04 LTS Precise Pangolin - LAMP Install (32 bit)' }, '17434': { 'id': '17434', 'name': 'Ubuntu Linux 12.04 LTS Precise Pangolin - LAMP Install (64 bit)' }, '20948': { 'id': '20948', 'name': 'Windows Server 2012 Standard Edition (64 bit)' }, '21074': { 'id': '21074', 'name': 'Windows Server 2008 Standard SP1 with R2 (64 bit)' }, '1857': { 'id': '1857', 'name': 'Windows Server 2008 R2 Standard Edition (64bit)' }, '1860': { 'id': '1860', 'name': 'Windows Server 2008 R2 Enterprise Edition (64bit)' }, '1742': { 'id': '1742', 'name': 'Windows Server 2008 Standard Edition SP2 (32bit)' }, '1752': { 'id': '1752', 'name': 'Windows Server 2008 Standard Edition SP2 (64bit)' }, '1756': { 'id': '1756', 'name': 'Windows Server 2008 Enterprise Edition SP2 (32bit)' }, '1761': { 'id': '1761', 'name': 'Windows Server 2008 Enterprise Edition SP2 (64bit)' }, '1766': { 'id': '1766', 'name': 'Windows Server 2008 Datacenter Edition SP2 (32bit)' }, '1770': { 'id': '1770', 'name': 'Windows Server 2008 Datacenter Edition SP2 (64bit)' }, '21060': { 'id': '21060', 'name': 'Windows Server 2012 Datacenter Edition With Hyper-V (64bit)' }, '20971': { 'id': '20971', 'name': 'Windows Server 2012 Datacenter Edition (64bit)' }, '21644': { 'id': '21644', 'name': 'Windows Server 2008 R2 Datacenter Edition With Hyper-V (64bit)' }, '13866': { 'id': '13866', 'name': 'Windows Server 2008 R2 Datacenter Edition (64bit)' }, '1700': { 'id': '1700', 'name': 'Windows Server 2003 Standard SP2 with R2 (32 bit)' }, '1701': { 'id': '1701', 'name': 'Windows Server 2003 Standard SP2 with R2 (64 bit)' }, '1716': { 'id': '1716', 'name': 'Windows Server 2003 Datacenter SP2 with R2 (32 bit)' }, '1715': { 'id': '1715', 'name': 'Windows Server 2003 Datacenter SP2 with R2 (64 bit)' }, '1702': { 'id': '1702', 'name': 'Windows Server 2003 Enterprise SP2 with R2 (32 bit)' }, '1703': { 'id': '1703', 'name': 'Windows Server 2003 Enterprise SP2 with R2 (64 bit)' }, '22418': { 'id': '22418', 'name': 'Citrix XenServer 6.2' }, '21133': { 'id': '21133', 'name': 'Citrix XenServer 6.1' }, '17228': { 'id': '17228', 'name': 'Citrix XenServer 6.0.2' }, '14059': { 'id': '14059', 'name': 'Citrix XenServer 6.0.0' }, '13891': { 'id': '13891', 'name': 'Citrix XenServer 5.6.2' }, '2380': { 'id': '2380', 'name': 'Citrix XenServer 5.6.1' }, '2214': { 'id': '2214', 'name': 'Citrix XenServer 5.6' }, '1806': { 'id': '1806', 'name': 'Citrix XenServer 5.5' }, '21158': { 'id': '21158', 'name': 'VMware ESXi 5.1' }, '14048': { 'id': '14048', 'name': 'VMware ESX 4.1' }, '2032': { 'id': '2032', 'name': 'VMware ESX 4.0' }, '21396': { 'id': '21396', 'name': 'Vyatta 6.5 Community Edition (64 bit)' }, '22177': { 'id': '22177', 'name': 'Vyatta 6.x Subscription Edition (64 bit)' }, } } return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) if deploy is True and salt.utils.which('sshpass') is None: raise SaltCloudSystemExit( 'Cannot deploy salt in a VM if the \'sshpass\' binary is not ' 'present on the system.') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) try: data = create_node(vm_) except Exception as exc: log.error( 'Error creating {0} on PARALLELS\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc.message), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False name = vm_['name'] if not wait_until(name, 'CREATED'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} start(vm_['name'], call='action') if not wait_until(name, 'STARTED'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if 'public-ip' not in data['network']: # Trigger another iteration return return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=5), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) comps = data['network']['public-ip']['address'].split('/') public_ip = comps[0] ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': public_ip, 'username': ssh_username, 'password': config.get_cloud_config_value('password', vm_, __opts__, search_global=False), 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return data
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'image_id': get_image(vm_), 'region_id': get_location(vm_), } key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename)) private_networking = config.get_cloud_config_value('private_networking', vm_, __opts__, search_global=False, default=None) kwargs['private_networking'] = 'true' if private_networking else 'false' salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': kwargs}, ) region = '' if kwargs['region_id'] is not None: region = 'SCHED_REQUIREMENTS="ID={0}"'.format(kwargs['region_id']) try: server, user, password = _get_xml_rpc() ret = server.one.template.instantiate(user + ':' + password, int(kwargs['image_id']), kwargs['name'], False, region)[1] except Exception as exc: log.error( 'Error creating {0} on OpenNebula\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], exc.message), # Show the traceback if the debug logging level is enabled exc_info=log.isEnabledFor(logging.DEBUG)) return False def __query_node_data(vm_name): data = _get_node(vm_name) if not data: # Trigger an error in the wait_for_ip function return False if data['state'] == '7': return False if data['lcm_state'] == '3': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=2), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(exc.message) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'host': data['private_ips'][0], 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret = {} ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def destroy(vm_name, call=None): ''' Call 'destroy' on the instance. Can be called with "-a destroy" or -d ''' if call and call != 'action': raise SaltCloudSystemExit( 'The destroy action must be called with -d or "-a destroy".') conn = get_conn() try: node = conn.ex_get_node(vm_name) except Exception as exc: # pylint: disable=W0703 log.error( 'Could not locate instance {0}\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_name, exc.message), exc_info=log.isEnabledFor(logging.DEBUG)) raise SaltCloudSystemExit( 'Could not find instance {0}.'.format(vm_name)) salt.utils.cloud.fire_event( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(vm_name), {'name': vm_name}, ) # Use the instance metadata to see if it's salt cloud profile was # preserved during instance create. If so, use the profile value # to see if the 'delete_boot_pd' value is set to delete the disk # along with the instance. profile = None if node.extra['metadata'] and 'items' in node.extra['metadata']: for md in node.extra['metadata']['items']: if md['key'] == 'salt-cloud-profile': profile = md['value'] vm_ = get_configured_provider() delete_boot_pd = False if profile is not None and profile in vm_['profiles']: if 'delete_boot_pd' in vm_['profiles'][profile]: delete_boot_pd = vm_['profiles'][profile]['delete_boot_pd'] try: inst_deleted = conn.destroy_node(node) except Exception as exc: # pylint: disable=W0703 log.error( 'Could not destroy instance {0}\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_name, exc.message), exc_info=log.isEnabledFor(logging.DEBUG)) raise SaltCloudSystemExit( 'Could not destroy instance {0}.'.format(vm_name)) if delete_boot_pd: salt.utils.cloud.fire_event( 'event', 'destroying persistent disk', 'salt/cloud/{0}/destroying-disk'.format(vm_name), {'name': vm_name}, ) try: conn.destroy_volume(conn.ex_get_volume(vm_name)) except Exception as exc: # pylint: disable=W0703 # Note that we don't raise a SaltCloudSystemExit here in order # to allow completion of instance deletion. Just log the error # and keep going. log.error( 'Could not destroy disk {0}\n\n' 'The following exception was thrown by libcloud when trying ' 'to run the initial deployment: \n{1}'.format( vm_name, exc.message), exc_info=log.isEnabledFor(logging.DEBUG)) salt.utils.cloud.fire_event( 'event', 'destroyed persistent disk', 'salt/cloud/{0}/destroyed-disk'.format(vm_name), {'name': vm_name}, ) salt.utils.cloud.fire_event( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(vm_name), {'name': vm_name}, ) return inst_deleted