def _get_firewall_rules(firewall_rules): ''' Construct a list of optional firewall rules from the cloud profile. ''' ret = [] for key, value in firewall_rules.iteritems(): # Verify the required 'protocol' property is present in the cloud # profile config if 'protocol' not in firewall_rules[key].keys(): raise SaltCloudConfigError( 'The firewall rule \'{0}\' is missing \'protocol\''.format(key) ) ret.append(FirewallRule( name=key, protocol=firewall_rules[key].get('protocol', None), source_mac=firewall_rules[key].get('source_mac', None), source_ip=firewall_rules[key].get('source_ip', None), target_ip=firewall_rules[key].get('target_ip', None), port_range_start=firewall_rules[key].get('port_range_start', None), port_range_end=firewall_rules[key].get('port_range_end', None), icmp_type=firewall_rules[key].get('icmp_type', None), icmp_code=firewall_rules[key].get('icmp_code', None) )) return ret
def _get_data_volumes(vm_): ''' Construct a list of optional data volumes from the cloud profile ''' ret = [] volumes = vm_['volumes'] for key, value in volumes.iteritems(): # Verify the required 'disk_size' property is present in the cloud # profile config if 'disk_size' not in volumes[key].keys(): raise SaltCloudConfigError( 'The volume \'{0}\' is missing \'disk_size\''.format(key) ) # Use 'HDD' if no 'disk_type' property is present in cloud profile if 'disk_type' not in volumes[key].keys(): volumes[key]['disk_type'] = 'HDD' # Construct volume object and assign to a list. volume = Volume( name=key, size=volumes[key]['disk_size'], disk_type=volumes[key]['disk_type'], licence_type='OTHER' ) # Set volume availability zone if defined in the cloud profile if 'disk_availability_zone' in volumes[key].keys(): volume.availability_zone = volumes[key]['disk_availability_zone'] ret.append(volume) return ret
def _get_firewall_rules(firewall_rules): """ Construct a list of optional firewall rules from the cloud profile. """ ret = [] for key, value in firewall_rules.items(): # Verify the required 'protocol' property is present in the cloud # profile config if "protocol" not in firewall_rules[key].keys(): raise SaltCloudConfigError( "The firewall rule '{}' is missing 'protocol'".format(key)) ret.append( FirewallRule( name=key, protocol=firewall_rules[key].get("protocol", None), source_mac=firewall_rules[key].get("source_mac", None), source_ip=firewall_rules[key].get("source_ip", None), target_ip=firewall_rules[key].get("target_ip", None), port_range_start=firewall_rules[key].get( "port_range_start", None), port_range_end=firewall_rules[key].get("port_range_end", None), icmp_type=firewall_rules[key].get("icmp_type", None), icmp_code=firewall_rules[key].get("icmp_code", None), )) return ret
def _get_data_volumes(vm_): """ Construct a list of optional data volumes from the cloud profile """ ret = [] volumes = vm_["volumes"] for key, value in volumes.items(): # Verify the required 'disk_size' property is present in the cloud # profile config if "disk_size" not in volumes[key].keys(): raise SaltCloudConfigError( "The volume '{}' is missing 'disk_size'".format(key)) # Use 'HDD' if no 'disk_type' property is present in cloud profile if "disk_type" not in volumes[key].keys(): volumes[key]["disk_type"] = "HDD" # Construct volume object and assign to a list. volume = Volume( name=key, size=volumes[key]["disk_size"], disk_type=volumes[key]["disk_type"], licence_type="OTHER", ) # Set volume availability zone if defined in the cloud profile if "disk_availability_zone" in volumes[key].keys(): volume.availability_zone = volumes[key]["disk_availability_zone"] ret.append(volume) return ret
def _get_ip(node_name): try: network = resolve_network() except KeyError: raise CommandExecutionError( SaltCloudConfigError( "Node {0} not declared in pillar nodes.".format(node_name))) for field in ["ipv4_address", "ipv6_address"]: if field in network: return network[field]
def destroy(name, call=None): ''' Destroy a node. Will check termination protection and warn if enabled. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') __utils__['cloud.fire_event']('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), { 'name': name }, transport=__opts__['transport']) data = show_instance(name, call='action') node = query(method='droplets', droplet_id=data['id'], http_method='delete') delete_record = config.get_cloud_config_value( 'delete_dns_record', get_configured_provider(), __opts__, search_global=False, default=None, ) if delete_record and not isinstance(delete_record, bool): raise SaltCloudConfigError( '\'delete_dns_record\' should be a boolean value.') if delete_record: delete_dns_record(name) __utils__['cloud.fire_event']('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), { 'name': name }, transport=__opts__['transport']) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return node
def write_file(self, dest_path, contents=None, local_file=None, perms='0644'): log.info("iocage - writing file {} on '{}'".format( dest_path, self.minion_id)) # unfortunately we have to work around this bug # https://github.com/saltstack/salt/issues/16592 # which is for salt-cp but also seems to affect file.write, where files that are too large will not get written... # it seems the problem has to do with the number of bytes written. For safety, we limit ourselves to 64kb # # the rest of the code does something equivalent to this (which does not work due to the bug): # ret = _execute_salt_module(target, 'file.write', [dest_path, contents]) if local_file is not None: if os.path.isdir(local_file): raise SaltCloudConfigError( 'The iocage driver does not support copying directories and {} is a directory' .format(local_file)) else: f = open(local_file, 'r') contents = f.read() f.close() log.debug("iocage - writing file {} on '{}' in multiple chunks".format( dest_path, self.minion_id)) ret = self._execute_salt_module('file.touch', [dest_path]) # TODO add more error handling? if not ret: return False i = 0 increment = 64 * 1024 while i < len(contents): chunk = contents[i:(i + increment)] #ret = self._execute_salt_module('file.append', [dest_path, chunk]) ret = self._execute_salt_module('file.seek_write', [dest_path, chunk, i]) i += increment if ret != len(chunk): return False log.debug( "iocage - DONE writing file {} on '{}' in multiple chunks".format( dest_path, self.minion_id)) ret = self._execute_salt_module('file.set_mode', [dest_path, perms]) if not ret: return False return True
def get_key_filename(vm_): """ Check SSH private key file and return absolute path if exists. """ key_filename = config.get_cloud_config_value( "ssh_private_key", vm_, __opts__, search_global=False, default=None ) if key_filename is not None: key_filename = os.path.expanduser(key_filename) if not os.path.isfile(key_filename): raise SaltCloudConfigError( "The defined ssh_private_key '{}' does not exist".format(key_filename) ) return key_filename
def get_all_properties(nodename=None): """ A function to get a node pillar configuration. CLI Example: salt * node.get_all_properties """ if nodename is None: nodename = __grains__["id"] all_nodes = _get_all_nodes() if nodename not in all_nodes: raise CommandExecutionError( SaltCloudConfigError( "Node {0} not declared in pillar.".format(nodename))) return all_nodes[nodename]
def get_public_keys(vm_): """ Retrieve list of SSH public keys. """ key_filename = config.get_cloud_config_value( "ssh_public_key", vm_, __opts__, search_global=False, default=None ) if key_filename is not None: key_filename = os.path.expanduser(key_filename) if not os.path.isfile(key_filename): raise SaltCloudConfigError( "The defined ssh_public_key '{}' does not exist".format(key_filename) ) ssh_keys = [] with salt.utils.files.fopen(key_filename) as rfh: for key in rfh.readlines(): ssh_keys.append(salt.utils.stringutils.to_unicode(key)) return ssh_keys
def load_public_key(vm_): ''' Load the public key file if exists. ''' public_key_filename = config.get_cloud_config_value( 'ssh_public_key', vm_, __opts__, search_global=False, default=None ) if public_key_filename is not None: public_key_filename = os.path.expanduser(public_key_filename) if not os.path.isfile(public_key_filename): raise SaltCloudConfigError( 'The defined ssh_public_key \'{0}\' does not exist'.format( public_key_filename ) ) with salt.utils.files.fopen(public_key_filename, 'r') as public_key: key = salt.utils.stringutils.to_unicode(public_key.read().replace('\n', '')) return key
def load_public_key(vm_): """ Load the public key file if exists. """ public_key_filename = config.get_cloud_config_value( "ssh_public_key", vm_, __opts__, search_global=False, default=None ) if public_key_filename is not None: public_key_filename = os.path.expanduser(public_key_filename) if not os.path.isfile(public_key_filename): raise SaltCloudConfigError( "The defined ssh_public_key '{}' does not exist".format( public_key_filename ) ) with salt.utils.files.fopen(public_key_filename, "r") as public_key: key = salt.utils.stringutils.to_unicode(public_key.read().replace("\n", "")) return key
def get_public_keys(vm_): ''' Retrieve list of SSH public keys. ''' key_filename = config.get_cloud_config_value( 'ssh_public_key', vm_, __opts__, search_global=False, default=None ) if key_filename is not None: key_filename = os.path.expanduser(key_filename) if not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_public_key \'{0}\' does not exist'.format( key_filename ) ) ssh_keys = [] for key in open(key_filename).readlines(): ssh_keys.append(key) return ssh_keys
def create_disk_from_distro(vm_, linode_id, swap_size=None): ''' Creates the disk for the Linode from the distribution. vm_ The VM profile to create the disk for. linode_id The ID of the Linode to create the distribution disk for. Required. swap_size The size of the disk, in MB. ''' kwargs = {} if swap_size is None: swap_size = get_swap_size(vm_) pub_key = get_pub_key(vm_) root_password = get_password(vm_) if pub_key: kwargs.update({'rootSSHKey': pub_key}) if root_password: kwargs.update({'rootPass': root_password}) else: raise SaltCloudConfigError( 'The Linode driver requires a password.' ) distribution_id = get_distribution_id(vm_) kwargs.update({'LinodeID': linode_id, 'DistributionID': distribution_id, 'Label': vm_['name'], 'Size': get_disk_size(vm_, swap_size)}) result = _query('linode', 'disk.createfromdistribution', args=kwargs) return _clean_data(result)
def create_disk_from_distro(vm_, linode_id, swap_size=None): r""" Creates the disk for the Linode from the distribution. vm\_ The VM profile to create the disk for. linode_id The ID of the Linode to create the distribution disk for. Required. swap_size The size of the disk, in MB. """ kwargs = {} if swap_size is None: swap_size = get_swap_size(vm_) pub_key = get_pub_key(vm_) root_password = get_password(vm_) if pub_key: kwargs.update({"rootSSHKey": pub_key}) if root_password: kwargs.update({"rootPass": root_password}) else: raise SaltCloudConfigError("The Linode driver requires a password.") kwargs.update( { "LinodeID": linode_id, "DistributionID": get_distribution_id(vm_), "Label": vm_["name"], "Size": get_disk_size(vm_, swap_size, linode_id), } ) result = _query("linode", "disk.createfromdistribution", args=kwargs) return _clean_data(result)
def get_wwwroot(nodename=None): """ A function to determine the wwwroot folder to use. Returns a string depending on the FQDN. CLI Example: salt * node.get_wwwroot """ hostname = _get_property("hostname", nodename, None) if hostname is None: raise CommandExecutionError( SaltCloudConfigError( "Node {0} doesn't have a hostname property".format(nodename))) if hostname.count(".") < 2: return "wwwroot/{0}/www".format(hostname) fqdn = hostname.split(".") return "wwwroot/{1}/{0}".format(".".join(fqdn[0:-2]), ".".join(fqdn[-2:]))
def get_config(node_name=None): """ A function to get relevant values for OpenSearch configuration. CLI Example: salt * opensearch.get_config """ if node_name is None: node_name = __grains__["id"] try: clusters = __pillar__["opensearch_clusters"] except KeyError: clusters = [] for _, cluster in clusters.items(): if node_name in cluster["nodes"]: return _expand_cluster_config(node_name, cluster) raise CommandExecutionError( SaltCloudConfigError( "Node {0} not declared in pillar opensearch_clusters.".format( node_name)))
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on Openstack and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) # Note: This currently requires libcloud trunk avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['ex_availability_zone'] = avz kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] floating = _assign_floating_ips(vm_, conn, kwargs) vm_['floating'] = floating files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False, default=None) if userdata_file is not None: try: with salt.utils.fopen(userdata_file, 'r') as fp_: kwargs['ex_userdata'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read()) except Exception as exc: log.exception('Failed to read userdata from %s: %s', userdata_file, exc) config_drive = config.get_cloud_config_value('config_drive', vm_, __opts__, default=None, search_global=False) if config_drive is not None: kwargs['ex_config_drive'] = config_drive __utils__['cloud.fire_event']('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), args={ 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile'], } }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', None) return data, vm_
def create(vm_): ''' Provision a single machine ''' if config.get_cloud_config_value('deploy', vm_, __opts__) is False: return { 'Error': { 'No Deploy': '\'deploy\' is not enabled. Not deploying.' } } key_filename = config.get_cloud_config_value('key_filename', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_keyfile {0!r} does not exist'.format( key_filename)) ret = {} log.info('Provisioning existing machine {0}'.format(vm_['name'])) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__) deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': vm_['ssh_host'], 'username': ssh_username, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'password': config.get_cloud_config_value('password', vm_, __opts__, search_global=False), 'key_filename': key_filename, 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_), 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True) } if 'ssh_port' in vm_: deploy_kwargs.update({'port': vm_['ssh_port']}) # forward any info about possible ssh gateway to deploy script # as some providers need also a 'gateway' configuration if 'gateway' in vm_: deploy_kwargs.update({'gateway': vm_['gateway']}) # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value('make_minion', vm_, __opts__, default=True) win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: ret['deployed'] = deployed log.info('Salt installed on {0}'.format(vm_['name'])) return ret log.error('Failed to start Salt on host {0}'.format(vm_['name'])) return { 'Error': { 'Not Deployed': 'Failed to start Salt on host {0}'.format(vm_['name']) } }
def create(vm_): """ Provision a single machine """ clone_strategy = vm_.get("clone_strategy") or "full" if clone_strategy not in ("quick", "full"): raise SaltCloudSystemExit( "'clone_strategy' must be one of quick or full. Got '{}'".format( clone_strategy)) ip_source = vm_.get("ip_source") or "ip-learning" if ip_source not in ("ip-learning", "qemu-agent"): raise SaltCloudSystemExit( "'ip_source' must be one of qemu-agent or ip-learning. Got '{}'". format(ip_source)) validate_xml = (vm_.get("validate_xml") if vm_.get("validate_xml") is not None else True) log.info( "Cloning '%s' with strategy '%s' validate_xml='%s'", vm_["name"], clone_strategy, validate_xml, ) try: # Check for required profile parameters before sending any API calls. if (vm_["profile"] and config.is_profile_configured( __opts__, _get_active_provider_name() or "libvirt", vm_["profile"]) is False): return False except AttributeError: pass # TODO: check name qemu/libvirt will choke on some characters (like '/')? name = vm_["name"] __utils__["cloud.fire_event"]( "event", "starting create", "salt/cloud/{}/creating".format(name), args=__utils__["cloud.filter_event"]( "creating", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) key_filename = config.get_cloud_config_value("private_key", vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( "The defined key_filename '{}' does not exist".format( key_filename)) vm_["key_filename"] = key_filename # wait_for_instance requires private_key vm_["private_key"] = key_filename cleanup = [] try: # clone the vm base = vm_["base_domain"] conn = __get_conn(vm_["url"]) try: # for idempotency the salt-bootstrap needs -F argument # script_args: -F clone_domain = conn.lookupByName(name) except libvirtError as e: domain = conn.lookupByName(base) # TODO: ensure base is shut down before cloning xml = domain.XMLDesc(0) kwargs = { "name": name, "base_domain": base, } __utils__["cloud.fire_event"]( "event", "requesting instance", "salt/cloud/{}/requesting".format(name), args={ "kwargs": __utils__["cloud.filter_event"]("requesting", kwargs, list(kwargs)), }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) log.debug("Source machine XML '%s'", xml) domain_xml = ElementTree.fromstring(xml) domain_xml.find("./name").text = name if domain_xml.find("./description") is None: description_elem = ElementTree.Element("description") domain_xml.insert(0, description_elem) description = domain_xml.find("./description") description.text = "Cloned from {}".format(base) domain_xml.remove(domain_xml.find("./uuid")) for iface_xml in domain_xml.findall("./devices/interface"): iface_xml.remove(iface_xml.find("./mac")) # enable IP learning, this might be a default behaviour... # Don't always enable since it can cause problems through libvirt-4.5 if (ip_source == "ip-learning" and iface_xml.find( "./filterref/parameter[@name='CTRL_IP_LEARNING']") is None): iface_xml.append(ElementTree.fromstring(IP_LEARNING_XML)) # If a qemu agent is defined we need to fix the path to its socket # <channel type='unix'> # <source mode='bind' path='/var/lib/libvirt/qemu/channel/target/domain-<dom-name>/org.qemu.guest_agent.0'/> # <target type='virtio' name='org.qemu.guest_agent.0'/> # <address type='virtio-serial' controller='0' bus='0' port='2'/> # </channel> for agent_xml in domain_xml.findall( """./devices/channel[@type='unix']"""): # is org.qemu.guest_agent.0 an option? if (agent_xml.find( """./target[@type='virtio'][@name='org.qemu.guest_agent.0']""" ) is not None): source_element = agent_xml.find( """./source[@mode='bind']""") # see if there is a path element that needs rewriting if source_element and "path" in source_element.attrib: path = source_element.attrib["path"] new_path = path.replace("/domain-{}/".format(base), "/domain-{}/".format(name)) log.debug("Rewriting agent socket path to %s", new_path) source_element.attrib["path"] = new_path for disk in domain_xml.findall( """./devices/disk[@device='disk'][@type='file']"""): # print "Disk: ", ElementTree.tostring(disk) # check if we can clone driver = disk.find("./driver[@name='qemu']") if driver is None: # Err on the safe side raise SaltCloudExecutionFailure( "Non qemu driver disk encountered bailing out.") disk_type = driver.attrib.get("type") log.info("disk attributes %s", disk.attrib) if disk_type == "qcow2": source = disk.find("./source").attrib["file"] pool, volume = find_pool_and_volume(conn, source) if clone_strategy == "quick": new_volume = pool.createXML( create_volume_with_backing_store_xml(volume), 0) else: new_volume = pool.createXMLFrom( create_volume_xml(volume), volume, 0) cleanup.append({"what": "volume", "item": new_volume}) disk.find("./source").attrib["file"] = new_volume.path() elif disk_type == "raw": source = disk.find("./source").attrib["file"] pool, volume = find_pool_and_volume(conn, source) # TODO: more control on the cloned disk type new_volume = pool.createXMLFrom(create_volume_xml(volume), volume, 0) cleanup.append({"what": "volume", "item": new_volume}) disk.find("./source").attrib["file"] = new_volume.path() else: raise SaltCloudExecutionFailure( "Disk type '{}' not supported".format(disk_type)) clone_xml = salt.utils.stringutils.to_str( ElementTree.tostring(domain_xml)) log.debug("Clone XML '%s'", clone_xml) validate_flags = libvirt.VIR_DOMAIN_DEFINE_VALIDATE if validate_xml else 0 clone_domain = conn.defineXMLFlags(clone_xml, validate_flags) cleanup.append({"what": "domain", "item": clone_domain}) clone_domain.createWithFlags(libvirt.VIR_DOMAIN_START_FORCE_BOOT) log.debug("VM '%s'", vm_) if ip_source == "qemu-agent": ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT elif ip_source == "ip-learning": ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE address = salt.utils.cloud.wait_for_ip( get_domain_ip, update_args=(clone_domain, 0, ip_source), timeout=config.get_cloud_config_value("wait_for_ip_timeout", vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value("wait_for_ip_interval", vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value( "wait_for_ip_interval_multiplier", vm_, __opts__, default=1), ) log.info("Address = %s", address) vm_["ssh_host"] = address # the bootstrap script needs to be installed first in /etc/salt/cloud.deploy.d/ # salt-cloud -u is your friend ret = __utils__["cloud.bootstrap"](vm_, __opts__) __utils__["cloud.fire_event"]( "event", "created instance", "salt/cloud/{}/created".format(name), args=__utils__["cloud.filter_event"]( "created", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) return ret except Exception: # pylint: disable=broad-except do_cleanup(cleanup) # throw the root cause after cleanup raise
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'aws', vm_['profile']) is False: return False except AttributeError: pass key_filename = config.get_cloud_config_value('private_key', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename)) location = get_location(vm_) log.info('Creating Cloud VM {0} in {1}'.format(vm_['name'], location)) conn = get_conn(location=location) usernames = ssh_username(vm_) kwargs = { 'ssh_key': config.get_cloud_config_value('private_key', vm_, __opts__, search_global=False), 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_), 'location': get_availability_zone(conn, vm_) } ex_keyname = keyname(vm_) if ex_keyname: kwargs['ex_keyname'] = ex_keyname ex_securitygroup = securitygroup(vm_) if ex_securitygroup: kwargs['ex_securitygroup'] = ex_securitygroup ex_blockdevicemappings = block_device_mappings(vm_) if ex_blockdevicemappings: kwargs['ex_blockdevicemappings'] = ex_blockdevicemappings ex_iam_profile = iam_profile(vm_) if ex_iam_profile: # libcloud does not implement 'iam_profile' yet. # A pull request has been suggested # https://github.com/apache/libcloud/pull/150 raise SaltCloudConfigError( 'libcloud does not implement \'iam_profile\' yet. ' 'Use EC2 driver instead.') tags = config.get_cloud_config_value('tag', vm_, __opts__, {}, search_global=False) if not isinstance(tags, dict): raise SaltCloudConfigError('\'tag\' should be a dict.') kwargs['ex_metadata'] = config.get_cloud_config_value('metadata', vm_, __opts__, default={}, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on AWS\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False log.info('Created node {0}'.format(vm_['name'])) def __get_node_data(conn, vm_name): data = get_node(conn, vm_name) if data is None: # Trigger a failure in the waiting function return False if ssh_interface(vm_) == 'private_ips' and data.private_ips: return data if ssh_interface(vm_) == 'public_ips' and data.public_ips: return data try: data = salt.utils.cloud.wait_for_ip( __get_node_data, update_args=(conn, vm_['name']), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=0.5), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) if tags: set_tags(vm_['name'], tags, call='action') if ssh_interface(vm_) == 'private_ips': log.info('Salt node data. Private_ip: {0}'.format(data.private_ips[0])) ip_address = data.private_ips[0] else: log.info('Salt node data. Public_ip: {0}'.format(data.public_ips[0])) ip_address = data.public_ips[0] if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = data.private_ips[0] log.info('Salt interface set to: {0}'.format(salt_ip_address)) else: salt_ip_address = data.public_ips[0] log.debug('Salt interface set to: {0}'.format(salt_ip_address)) username = '******' ssh_connect_timeout = config.get_cloud_config_value( 'ssh_connect_timeout', vm_, __opts__, 900 # 15 minutes ) ssh_port = config.get_cloud_config_value('ssh_port', vm_, __opts__, 22) if salt.utils.cloud.wait_for_port(ip_address, timeout=ssh_connect_timeout): for user in usernames: if salt.utils.cloud.wait_for_passwd( host=ip_address, username=user, ssh_timeout=config.get_cloud_config_value( 'wait_for_passwd_timeout', vm_, __opts__, default=1 * 60), key_filename=key_filename, known_hosts_file=config.get_cloud_config_value( 'known_hosts_file', vm_, __opts__, default='/dev/null'), ): username = user break else: raise SaltCloudSystemExit( 'Failed to authenticate against remote ssh') ret = {} if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'port': ssh_port, 'salt_host': salt_ip_address, 'username': username, 'key_filename': key_filename, 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'script': deploy_script.script, 'name': vm_['name'], 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'conf_file': __opts__['conf_file'], 'sock_dir': __opts__['sock_dir'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM ret['deploy_kwargs'] = deploy_kwargs deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {name}'.format(**vm_)) else: log.error('Failed to start Salt on Cloud VM {name}'.format(**vm_)) ret.update(data.__dict__) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) volumes = config.get_cloud_config_value('volumes', vm_, __opts__, search_global=True) if volumes: log.info('Create and attach volumes to node {0}'.format(data.name)) create_attach_volumes(volumes, location, data) return ret
def _get_server(vm_): ''' Construct server instance from cloud profile config ''' description = config.get_cloud_config_value('description', vm_, __opts__, default=None, search_global=False) ssh_key = load_public_key(vm_) server_type = config.get_cloud_config_value('server_type', vm_, __opts__, default='cloud', search_global=False) vcore = None cores_per_processor = None ram = None fixed_instance_size_id = None baremetal_model_id = None if 'fixed_instance_size' in vm_: fixed_instance_size = get_size(vm_) fixed_instance_size_id = fixed_instance_size['id'] elif 'vm_core' in vm_ and 'cores_per_processor' in vm_ and 'ram' in vm_ and 'hdds' in vm_: vcore = config.get_cloud_config_value('vcore', vm_, __opts__, default=None, search_global=False) cores_per_processor = config.get_cloud_config_value( 'cores_per_processor', vm_, __opts__, default=None, search_global=False) ram = config.get_cloud_config_value('ram', vm_, __opts__, default=None, search_global=False) elif 'baremetal_model_id' in vm_ and server_type == 'baremetal': baremetal_model_id = config.get_cloud_config_value( 'baremetal_model_id', vm_, __opts__, default=None, search_global=False) else: raise SaltCloudConfigError( "'fixed_instance_size' or 'vcore', " "'cores_per_processor', 'ram', and 'hdds' " "must be provided for 'cloud' server. " "For 'baremetal' server, 'baremetal_model_id'" "must be provided.") appliance_id = config.get_cloud_config_value('appliance_id', vm_, __opts__, default=None, search_global=False) password = config.get_cloud_config_value('password', vm_, __opts__, default=None, search_global=False) firewall_policy_id = config.get_cloud_config_value('firewall_policy_id', vm_, __opts__, default=None, search_global=False) ip_id = config.get_cloud_config_value('ip_id', vm_, __opts__, default=None, search_global=False) load_balancer_id = config.get_cloud_config_value('load_balancer_id', vm_, __opts__, default=None, search_global=False) monitoring_policy_id = config.get_cloud_config_value( 'monitoring_policy_id', vm_, __opts__, default=None, search_global=False) datacenter_id = config.get_cloud_config_value('datacenter_id', vm_, __opts__, default=None, search_global=False) private_network_id = config.get_cloud_config_value('private_network_id', vm_, __opts__, default=None, search_global=False) power_on = config.get_cloud_config_value('power_on', vm_, __opts__, default=True, search_global=False) public_key = config.get_cloud_config_value('public_key_ids', vm_, __opts__, default=None, search_global=False) # Contruct server object return Server(name=vm_['name'], description=description, fixed_instance_size_id=fixed_instance_size_id, vcore=vcore, cores_per_processor=cores_per_processor, ram=ram, appliance_id=appliance_id, password=password, power_on=power_on, firewall_policy_id=firewall_policy_id, ip_id=ip_id, load_balancer_id=load_balancer_id, monitoring_policy_id=monitoring_policy_id, datacenter_id=datacenter_id, rsa_key=ssh_key, private_network_id=private_network_id, public_key=public_key, server_type=server_type, baremetal_model_id=baremetal_model_id)
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport'] ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size': get_size(vm_), 'image': get_image(vm_), 'region': get_location(vm_), } kwargs['ssh_keys'] = [] # backwards compat ssh_key_name = config.get_cloud_config_value( 'ssh_key_name', vm_, __opts__, search_global=False ) if ssh_key_name: kwargs['ssh_keys'].append(get_keyid(ssh_key_name)) ssh_key_names = config.get_cloud_config_value( 'ssh_key_names', vm_, __opts__, search_global=False, default=False ) if ssh_key_names: for key in ssh_key_names.split(','): kwargs['ssh_keys'].append(get_keyid(key)) key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename ) ) if key_filename is None: raise SaltCloudConfigError( 'The DigitalOcean driver requires an ssh_key_file and an ssh_key_name ' 'because it does not supply a root password upon building the server.' ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") kwargs['private_networking'] = private_networking backups_enabled = config.get_cloud_config_value( 'backups_enabled', vm_, __opts__, search_global=False, default=None, ) if backups_enabled is not None: if not isinstance(backups_enabled, bool): raise SaltCloudConfigError("'backups_enabled' should be a boolean value.") kwargs['backups'] = backups_enabled ipv6 = config.get_cloud_config_value( 'ipv6', vm_, __opts__, search_global=False, default=None, ) if ipv6 is not None: if not isinstance(ipv6, bool): raise SaltCloudConfigError("'ipv6' should be a boolean value.") kwargs['ipv6'] = ipv6 salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport'] ) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on DIGITAL_OCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format( vm_['name'], str(exc) ), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False def __query_node_data(vm_name): data = show_instance(vm_name, 'action') if not data: # Trigger an error in the wait_for_ip function return False if data['networks'].get('v4'): for network in data['networks']['v4']: if network['type'] == 'public': return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'],), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) for network in data['networks']['v4']: if network['type'] == 'public': ip_address = network['ip_address'] create_record = config.get_cloud_config_value( 'create_dns_record', vm_, __opts__, search_global=False, default=None, ) if create_record is not None: if not isinstance(create_record, bool): raise SaltCloudConfigError("'create_dns_record' should be a boolean value.") if create_record: create_dns_record(vm_['name'], ip_address) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value( 'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud' ), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'sudo': config.get_cloud_config_value( 'sudo', vm_, __opts__, default=(ssh_username != 'root') ), 'sudo_password': config.get_cloud_config_value( 'sudo_password', vm_, __opts__, default=None ), 'tty': config.get_cloud_config_value( 'tty', vm_, __opts__, default=False ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport'] ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error( 'Failed to start Salt on Cloud VM {0}'.format( vm_['name'] ) ) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport'] ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' if 'driver' not in vm_: vm_['driver'] = vm_['provider'] private_networking = config.get_cloud_config_value( 'enable_private_network', vm_, __opts__, search_global=False, default=False, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") if private_networking is True: enable_private_network = 'yes' else: enable_private_network = 'no' __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID') if not osid: log.error('Vultr does not have an image with id or name {0}'.format(vm_['image'])) return False vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID') if not vpsplanid: log.error('Vultr does not have a size with id or name {0}'.format(vm_['size'])) return False dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID') if not dcid: log.error('Vultr does not have a location with id or name {0}'.format(vm_['location'])) return False kwargs = { 'label': vm_['name'], 'OSID': osid, 'VPSPLANID': vpsplanid, 'DCID': dcid, 'hostname': vm_['name'], 'enable_private_network': enable_private_network, } log.info('Creating Cloud VM {0}'.format(vm_['name'])) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, kwargs.keys()), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) try: data = _query('server/create', method='POST', data=urllib.urlencode(kwargs)) if int(data.get('status', '200')) >= 300: log.error('Error creating {0} on Vultr\n\n' 'Vultr API returned {1}\n'.format(vm_['name'], data)) log.error('Status 412 may mean that you are requesting an\n' 'invalid location, image, or size.') __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False except Exception as exc: log.error( 'Error creating {0} on Vultr\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format( vm_['name'], str(exc) ), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False def wait_for_hostname(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for hostname") # pprint.pprint(data) if str(data.get('main_ip', '0')) == '0': time.sleep(3) return False return data['main_ip'] def wait_for_default_password(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for default password") # pprint.pprint(data) if str(data.get('default_password', '')) == '': time.sleep(1) return False return data['default_password'] def wait_for_status(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for status normal") # pprint.pprint(data) if str(data.get('status', '')) != 'active': time.sleep(1) return False return data['default_password'] def wait_for_server_state(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for server state ok") # pprint.pprint(data) if str(data.get('server_state', '')) != 'ok': time.sleep(1) return False return data['default_password'] vm_['ssh_host'] = __utils__['cloud.wait_for_fun']( wait_for_hostname, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['password'] = __utils__['cloud.wait_for_fun']( wait_for_default_password, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_status, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_server_state, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __opts__['hard_timeout'] = config.get_cloud_config_value( 'hard_timeout', get_configured_provider(), __opts__, search_global=False, default=None, ) # Bootstrap ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(show_instance(vm_['name'], call='action')) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug( '\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename)) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'%s\'', vm_['name']) vm_['priv_key'], vm_['pub_key'] = __utils__['cloud.gen_keys']( config.get_cloud_config_value('keysize', vm_, __opts__)) data = show_instance(vm_['instance_id'], conn=conn, call='action') else: # Put together all of the information required to request the instance, # and then fire off the request for it data = request_instance(conn=conn, call='action', vm_=vm_) log.debug('VM is now running') def __query_node_ip(vm_): data = show_instance(vm_['name'], conn=conn, call='action') return preferred_ip(vm_, data[ssh_interface(vm_)]) try: ip_address = __utils__['cloud.wait_for_ip'](__query_node_ip, update_args=(vm_, )) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) salt_interface = __utils__['cloud.get_salt_interface'](vm_, __opts__) salt_ip_address = preferred_ip(vm_, data[salt_interface]) log.debug('Salt interface set to: %s', salt_ip_address) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug('\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data)) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': data['id'], 'floating_ips': data['floating_ips'], 'fixed_ips': data['fixed_ips'], 'private_ips': data['private_ips'], 'public_ips': data['public_ips'], } __utils__['cloud.fire_event']('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret
def destroy(name, call=None): ''' Destroy a node. Will check termination protection and warn if enabled. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') __utils__['cloud.fire_event']('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={ 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) data = show_instance(name, call='action') node = query(method='droplets', droplet_id=data['id'], http_method='delete') ## This is all terribly optomistic: # vm_ = get_vm_config(name=name) # delete_dns_record = config.get_cloud_config_value( # 'delete_dns_record', vm_, __opts__, search_global=False, default=None, # ) # TODO: when _vm config data can be made available, we should honor the configuration settings, # but until then, we should assume stale DNS records are bad, and default behavior should be to # delete them if we can. When this is resolved, also resolve the comments a couple of lines below. delete_dns_record = True if not isinstance(delete_dns_record, bool): raise SaltCloudConfigError( '\'delete_dns_record\' should be a boolean value.') # When the "to do" a few lines up is resolved, remove these lines and use the if/else logic below. log.debug('Deleting DNS records for {0}.'.format(name)) destroy_dns_records(name) # Until the "to do" from line 754 is taken care of, we don't need this logic. # if delete_dns_record: # log.debug('Deleting DNS records for {0}.'.format(name)) # destroy_dns_records(name) # else: # log.debug('delete_dns_record : {0}'.format(delete_dns_record)) # for line in pprint.pformat(dir()).splitlines(): # log.debug('delete context: {0}'.format(line)) __utils__['cloud.fire_event']('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={ 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir']( name, __active_provider_name__.split(':')[0], __opts__) return node
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'digital_ocean', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size': get_size(vm_), 'image': get_image(vm_), 'region': get_location(vm_), 'ssh_keys': [] } # backwards compat ssh_key_name = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) if ssh_key_name: kwargs['ssh_keys'].append(get_keyid(ssh_key_name)) ssh_key_names = config.get_cloud_config_value('ssh_key_names', vm_, __opts__, search_global=False, default=False) if ssh_key_names: for key in ssh_key_names.split(','): kwargs['ssh_keys'].append(get_keyid(key)) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename)) if not __opts__.get('ssh_agent', False) and key_filename is None: raise SaltCloudConfigError( 'The DigitalOcean driver requires an ssh_key_file and an ssh_key_name ' 'because it does not supply a root password upon building the server.' ) ssh_interface = config.get_cloud_config_value('ssh_interface', vm_, __opts__, search_global=False, default='public') if ssh_interface in ['private', 'public']: log.info("ssh_interface: Setting interface for ssh to {}".format( ssh_interface)) kwargs['ssh_interface'] = ssh_interface else: raise SaltCloudConfigError( "The DigitalOcean driver requires ssh_interface to be defined as 'public' or 'private'." ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError( "'private_networking' should be a boolean value.") kwargs['private_networking'] = private_networking if not private_networking and ssh_interface == 'private': raise SaltCloudConfigError( "The DigitalOcean driver requires ssh_interface if defined as 'private' " "then private_networking should be set as 'True'.") backups_enabled = config.get_cloud_config_value( 'backups_enabled', vm_, __opts__, search_global=False, default=None, ) if backups_enabled is not None: if not isinstance(backups_enabled, bool): raise SaltCloudConfigError( "'backups_enabled' should be a boolean value.") kwargs['backups'] = backups_enabled ipv6 = config.get_cloud_config_value( 'ipv6', vm_, __opts__, search_global=False, default=None, ) if ipv6 is not None: if not isinstance(ipv6, bool): raise SaltCloudConfigError("'ipv6' should be a boolean value.") kwargs['ipv6'] = ipv6 userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False, default=None) if userdata_file is not None: try: with salt.utils.files.fopen(userdata_file, 'r') as fp_: kwargs['user_data'] = salt.utils.cloud.userdata_template( __opts__, vm_, fp_.read()) except Exception as exc: log.exception('Failed to read userdata from %s: %s', userdata_file, exc) create_dns_record = config.get_cloud_config_value( 'create_dns_record', vm_, __opts__, search_global=False, default=None, ) if create_dns_record: log.info('create_dns_record: will attempt to write DNS records') default_dns_domain = None dns_domain_name = vm_['name'].split('.') if len(dns_domain_name) > 2: log.debug( 'create_dns_record: inferring default dns_hostname, dns_domain from minion name as FQDN' ) default_dns_hostname = '.'.join(dns_domain_name[:-2]) default_dns_domain = '.'.join(dns_domain_name[-2:]) else: log.debug( "create_dns_record: can't infer dns_domain from {0}".format( vm_['name'])) default_dns_hostname = dns_domain_name[0] dns_hostname = config.get_cloud_config_value( 'dns_hostname', vm_, __opts__, search_global=False, default=default_dns_hostname, ) dns_domain = config.get_cloud_config_value( 'dns_domain', vm_, __opts__, search_global=False, default=default_dns_domain, ) if dns_hostname and dns_domain: log.info( 'create_dns_record: using dns_hostname="{0}", dns_domain="{1}"' .format(dns_hostname, dns_domain)) __add_dns_addr__ = lambda t, d: post_dns_record(dns_domain= dns_domain, name=dns_hostname, record_type=t, record_data=d) log.debug('create_dns_record: {0}'.format(__add_dns_addr__)) else: log.error( 'create_dns_record: could not determine dns_hostname and/or dns_domain' ) raise SaltCloudConfigError( '\'create_dns_record\' must be a dict specifying "domain" ' 'and "hostname" or the minion name must be an FQDN.') __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on DIGITAL_OCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(vm_name): data = show_instance(vm_name, 'action') if not data: # Trigger an error in the wait_for_ip function return False if data['networks'].get('v4'): for network in data['networks']['v4']: if network['type'] == 'public': return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) if not vm_.get('ssh_host'): vm_['ssh_host'] = None # add DNS records, set ssh_host, default to first found IP, preferring IPv4 for ssh bootstrap script target addr_families, dns_arec_types = (('v4', 'v6'), ('A', 'AAAA')) arec_map = dict(list(zip(addr_families, dns_arec_types))) for facing, addr_family, ip_address in [ (net['type'], family, net['ip_address']) for family in addr_families for net in data['networks'][family] ]: log.info('found {0} IP{1} interface for "{2}"'.format( facing, addr_family, ip_address)) dns_rec_type = arec_map[addr_family] if facing == 'public': if create_dns_record: __add_dns_addr__(dns_rec_type, ip_address) if facing == ssh_interface: if not vm_['ssh_host']: vm_['ssh_host'] = ip_address if vm_['ssh_host'] is None: raise SaltCloudSystemExit( 'No suitable IP addresses found for ssh minion bootstrapping: {0}'. format(repr(data['networks']))) log.debug( 'Found public IP address to use for ssh minion bootstrapping: {0}'. format(vm_['ssh_host'])) vm_['key_filename'] = key_filename ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return ret
def create(vm_): ''' Create a single VM from a data dict ''' salt.utils.cloud.fire_event( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'image_id': get_image(vm_), 'region_id': get_location(vm_), } key_filename = config.get_cloud_config_value( 'ssh_key_file', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename {0!r} does not exist'.format( key_filename ) ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None ) kwargs['private_networking'] = 'true' if private_networking else 'false' salt.utils.cloud.fire_event( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), {'kwargs': kwargs}, ) region = '' if kwargs['region_id'] is not None: region = 'SCHED_REQUIREMENTS="ID={0}"'.format(kwargs['region_id']) try: server, user, password = _get_xml_rpc() ret = server.one.template.instantiate(user+':'+password, int(kwargs['image_id']), kwargs['name'], False, region)[1] except Exception as exc: log.error( 'Error creating {0} on OpenNebula\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format( vm_['name'], str(exc) ), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if not data: # Trigger an error in the wait_for_ip function return False if data['state'] == '7': return False if data['lcm_state'] == '3': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'],), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=2), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) if config.get_cloud_config_value('deploy', vm_, __opts__) is True: deploy_script = script(vm_) deploy_kwargs = { 'host': data['private_ips'][0], 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value( 'tmp_dir', vm_, __opts__, default='/tmp/.saltcloud' ), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'display_ssh_output': config.get_cloud_config_value( 'display_ssh_output', vm_, __opts__, default=True ), 'sudo': config.get_cloud_config_value( 'sudo', vm_, __opts__, default=(ssh_username != 'root') ), 'sudo_password': config.get_cloud_config_value( 'sudo_password', vm_, __opts__, default=None ), 'tty': config.get_cloud_config_value( 'tty', vm_, __opts__, default=False ), 'script_args': config.get_cloud_config_value( 'script_args', vm_, __opts__ ), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True ) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator' ) deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='' ) # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret = {} ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event( 'event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format(vm_['name']), {'kwargs': event_kwargs}, ) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error( 'Failed to start Salt on Cloud VM {0}'.format( vm_['name'] ) ) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug( '{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) salt.utils.cloud.fire_event( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, ) return ret
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'nova', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename)) vm_['key_filename'] = key_filename __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'{0[name]}\''.format(vm_)) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value('keysize', vm_, __opts__)) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[ 'change_password'] is True: vm_['password'] = sup.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(vm_, data, conn), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif ssh_interface(vm_) == 'fixed_ips': ip_address = preferred_ip(vm_, data.fixed_ips) elif ssh_interface(vm_) == 'floating_ips': ip_address = preferred_ip(vm_, data.floating_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'fixed_ips': salt_ip_address = preferred_ip(vm_, data.fixed_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) elif salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'floating_ips': salt_ip_address = preferred_ip(vm_, data.floating_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: {0}'.format(salt_ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) event_data = { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], 'instance_id': vm_['instance_id'], 'floating_ips': data.floating_ips, 'fixed_ips': data.fixed_ips, 'private_ips': data.private_ips, 'public_ips': data.public_ips } __utils__['cloud.fire_event']('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', event_data, list(event_data)), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) __utils__['cloud.cachedir_index_add'](vm_['name'], vm_['profile'], 'nova', vm_['driver']) return ret
def create(vm_): ''' Provision a single machine ''' clone_strategy = vm_.get('clone_strategy') or 'full' if clone_strategy not in set(['quick', 'full']): raise SaltCloudSystemExit("'clone_strategy' must be one of quick or full. Got '{0}'".format(clone_strategy)) ip_source = vm_.get('ip_source') or 'ip-learning' if ip_source not in set(['ip-learning', 'qemu-agent']): raise SaltCloudSystemExit("'ip_source' must be one of qemu-agent or ip-learning. Got '{0}'".format(ip_source)) validate_xml = vm_.get('validate_xml') if vm_.get('validate_xml') is not None else True log.info("Cloning machine '{0}' with strategy '{1}' validate_xml='{2}'".format(vm_['name'], clone_strategy, validate_xml)) try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'libvirt', vm_['profile']) is False: return False except AttributeError: pass # TODO: check name qemu/libvirt will choke on some characters (like '/')? name = vm_['name'] __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(name), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) key_filename = config.get_cloud_config_value( 'private_key', vm_, __opts__, search_global=False, default=None ) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename ) ) vm_['key_filename'] = key_filename # wait_for_instance requires private_key vm_['private_key'] = key_filename cleanup = [] try: # clone the vm base = vm_['base_domain'] conn = __get_conn(vm_['url']) try: # for idempotency the salt-bootstrap needs -F argument # script_args: -F clone_domain = conn.lookupByName(name) except libvirtError as e: domain = conn.lookupByName(base) # TODO: ensure base is shut down before cloning xml = domain.XMLDesc(0) kwargs = { 'name': name, 'base_domain': base, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(name), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.debug("Source machine XML '{0}'".format(xml)) domain_xml = ElementTree.fromstring(xml) domain_xml.find('./name').text = name if domain_xml.find('./description') is None: description_elem = ElementTree.Element('description') domain_xml.insert(0, description_elem) description = domain_xml.find('./description') description.text = "Cloned from {0}".format(base) domain_xml.remove(domain_xml.find('./uuid')) for iface_xml in domain_xml.findall('./devices/interface'): iface_xml.remove(iface_xml.find('./mac')) # enable IP learning, this might be a default behaviour... if iface_xml.find("./filterref/parameter[@name='CTRL_IP_LEARNING']") is None: iface_xml.append(ElementTree.fromstring(IP_LEARNING_XML)) # If a qemu agent is defined we need to fix the path to its socket # <channel type='unix'> # <source mode='bind' path='/var/lib/libvirt/qemu/channel/target/domain-<dom-name>/org.qemu.guest_agent.0'/> # <target type='virtio' name='org.qemu.guest_agent.0'/> # <address type='virtio-serial' controller='0' bus='0' port='2'/> # </channel> for agent_xml in domain_xml.findall("""./devices/channel[@type='unix']"""): # is org.qemu.guest_agent.0 an option? if agent_xml.find("""./target[@type='virtio'][@name='org.qemu.guest_agent.0']""") is not None: source_element = agent_xml.find("""./source[@mode='bind']""") # see if there is a path element that needs rewriting if source_element and 'path' in source_element.attrib: path = source_element.attrib['path'] new_path = path.replace('/domain-{0}/'.format(base), '/domain-{0}/'.format(name)) log.debug("Rewriting agent socket path to {0}".format(new_path)) source_element.attrib['path'] = new_path for disk in domain_xml.findall("""./devices/disk[@device='disk'][@type='file']"""): # print "Disk: ", ElementTree.tostring(disk) # check if we can clone driver = disk.find("./driver[@name='qemu']") if driver is None: # Err on the safe side raise SaltCloudExecutionFailure("Non qemu driver disk encountered bailing out.") disk_type = driver.attrib.get('type') log.info("disk attributes {0}".format(disk.attrib)) if disk_type == 'qcow2': source = disk.find("./source").attrib['file'] pool, volume = find_pool_and_volume(conn, source) if clone_strategy == 'quick': new_volume = pool.createXML(create_volume_with_backing_store_xml(volume), 0) else: new_volume = pool.createXMLFrom(create_volume_xml(volume), volume, 0) cleanup.append({'what': 'volume', 'item': new_volume}) disk.find("./source").attrib['file'] = new_volume.path() elif disk_type == 'raw': source = disk.find("./source").attrib['file'] pool, volume = find_pool_and_volume(conn, source) # TODO: more control on the cloned disk type new_volume = pool.createXMLFrom(create_volume_xml(volume), volume, 0) cleanup.append({'what': 'volume', 'item': new_volume}) disk.find("./source").attrib['file'] = new_volume.path() else: raise SaltCloudExecutionFailure("Disk type '{0}' not supported".format(disk_type)) clone_xml = ElementTree.tostring(domain_xml) log.debug("Clone XML '{0}'".format(clone_xml)) validate_flags = libvirt.VIR_DOMAIN_DEFINE_VALIDATE if validate_xml else 0 clone_domain = conn.defineXMLFlags(clone_xml, validate_flags) cleanup.append({'what': 'domain', 'item': clone_domain}) clone_domain.createWithFlags(libvirt.VIR_DOMAIN_START_FORCE_BOOT) log.debug("VM '{0}'".format(vm_)) if ip_source == 'qemu-agent': ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT elif ip_source == 'ip-learning': ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE address = salt.utils.cloud.wait_for_ip( get_domain_ip, update_args=(clone_domain, 0, ip_source), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value('wait_for_ip_interval_multiplier', vm_, __opts__, default=1), ) log.info('Address = {0}'.format(address)) vm_['ssh_host'] = address # the bootstrap script needs to be installed first in /etc/salt/cloud.deploy.d/ # salt-cloud -u is your friend ret = __utils__['cloud.bootstrap'](vm_, __opts__) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(name), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret except Exception as e: # pylint: disable=broad-except # Try to clean up in as much cases as possible log.info('Cleaning up after exception clean up items: {0}'.format(cleanup)) for leftover in cleanup: what = leftover['what'] item = leftover['item'] if what == 'domain': destroy_domain(conn, item) if what == 'volume': item.delete() raise e