def query(params=None): ''' Make a web call to QingCloud IaaS API. ''' path = 'https://api.qingcloud.com/iaas/' access_key_id = config.get_cloud_config_value('access_key_id', get_configured_provider(), __opts__, search_global=False) access_key_secret = config.get_cloud_config_value( 'secret_access_key', get_configured_provider(), __opts__, search_global=False) # public interface parameters real_parameters = { 'access_key_id': access_key_id, 'signature_version': DEFAULT_QINGCLOUD_SIGNATURE_VERSION, 'time_stamp': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), 'version': DEFAULT_QINGCLOUD_API_VERSION, } # include action or function parameters if params: for key, value in params.items(): if isinstance(value, list): for i in range(1, len(value) + 1): if isinstance(value[i - 1], dict): for sk, sv in value[i - 1].items(): if isinstance(sv, dict) or isinstance(sv, list): sv = salt.utils.json.dumps(sv, separators=(',', ':')) real_parameters['{0}.{1}.{2}'.format(key, i, sk)] = sv else: real_parameters['{0}.{1}'.format(key, i)] = value[i - 1] else: real_parameters[key] = value # Calculate the string for Signature signature = _compute_signature(real_parameters, access_key_secret, 'GET', '/iaas/') real_parameters['signature'] = signature # print('parameters:') # pprint.pprint(real_parameters) request = requests.get(path, params=real_parameters, verify=False) # print('url:') # print(request.url) if request.status_code != 200: raise SaltCloudSystemExit( 'An error occurred while querying QingCloud. HTTP Code: {0} ' 'Error: \'{1}\''.format(request.status_code, request.text)) log.debug(request.url) content = request.text result = salt.utils.json.loads(content, object_hook=salt.utils.data.encode_dict) # print('response:') # pprint.pprint(result) if result['ret_code'] != 0: raise SaltCloudSystemExit(pprint.pformat(result.get('message', {}))) return result
def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str """ log.info("Attempting to delete instance {0}".format(name)) if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') found = [] providers = __opts__.get('providers', {}) providers_to_check = [ _f for _f in [cfg.get('libvirt') for cfg in six.itervalues(providers)] if _f ] for provider in providers_to_check: conn = __get_conn(provider['url']) log.info("looking at {0}".format(provider['url'])) try: domain = conn.lookupByName(name) found.append({'domain': domain, 'conn': conn}) except libvirtError: pass if not found: return "{0} doesn't exist and can't be deleted".format(name) if len(found) > 1: return "{0} doesn't identify a unique machine leaving things".format( name) __utils__['cloud.fire_event']('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), args={ 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) destroy_domain(found[0]['conn'], found[0]['domain']) __utils__['cloud.fire_event']('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), args={ 'name': name }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'])
def create(vm_): """ Create a single VM from a data dict """ try: # Check for required profile parameters before sending any API calls. if (vm_["profile"] and config.is_profile_configured( __opts__, __active_provider_name__ or "dimensiondata", vm_["profile"]) is False): return False except AttributeError: pass __utils__["cloud.fire_event"]( "event", "starting create", "salt/cloud/{}/creating".format(vm_["name"]), args=__utils__["cloud.filter_event"]( "creating", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) log.info("Creating Cloud VM %s", vm_["name"]) conn = get_conn() location = conn.ex_get_location_by_id(vm_["location"]) images = conn.list_images(location=location) image = [x for x in images if x.id == vm_["image"]][0] network_domains = conn.ex_list_network_domains(location=location) try: network_domain = [ y for y in network_domains if y.name == vm_["network_domain"] ][0] except IndexError: network_domain = conn.ex_create_network_domain( location=location, name=vm_["network_domain"], plan="ADVANCED", description="", ) try: vlan = [ y for y in conn.ex_list_vlans(location=location, network_domain=network_domain) if y.name == vm_["vlan"] ][0] except (IndexError, KeyError): # Use the first VLAN in the network domain vlan = conn.ex_list_vlans(location=location, network_domain=network_domain)[0] kwargs = { "name": vm_["name"], "image": image, "ex_description": vm_["description"], "ex_network_domain": network_domain, "ex_vlan": vlan, "ex_is_started": vm_["is_started"], } event_data = _to_event_data(kwargs) __utils__["cloud.fire_event"]( "event", "requesting instance", "salt/cloud/{}/requesting".format(vm_["name"]), args=__utils__["cloud.filter_event"]("requesting", event_data, list(event_data)), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) # Initial password (excluded from event payload) initial_password = NodeAuthPassword(vm_["auth"]) kwargs["auth"] = initial_password try: data = conn.create_node(**kwargs) except Exception as exc: # pylint: disable=broad-except log.error( "Error creating %s on DIMENSIONDATA\n\n" "The following exception was thrown by libcloud when trying to " "run the initial deployment: \n%s", vm_["name"], exc, exc_info_on_loglevel=logging.DEBUG, ) return False try: data = __utils__["cloud.wait_for_ip"]( _query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value("wait_for_ip_timeout", vm_, __opts__, default=25 * 60), interval=config.get_cloud_config_value("wait_for_ip_interval", vm_, __opts__, default=30), max_failures=config.get_cloud_config_value( "wait_for_ip_max_failures", vm_, __opts__, default=60), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_["name"]) # pylint: disable=not-callable except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) log.debug("VM is now running") if ssh_interface(vm_) == "private_ips": ip_address = preferred_ip(vm_, data.private_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug("Using IP address %s", ip_address) if __utils__["cloud.get_salt_interface"](vm_, __opts__) == "private_ips": salt_ip_address = preferred_ip(vm_, data.private_ips) log.info("Salt interface set to: %s", salt_ip_address) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug("Salt interface set to: %s", salt_ip_address) if not ip_address: raise SaltCloudSystemExit("No IP addresses could be found.") vm_["salt_host"] = salt_ip_address vm_["ssh_host"] = ip_address vm_["password"] = vm_["auth"] ret = __utils__["cloud.bootstrap"](vm_, __opts__) ret.update(data.__dict__) if "password" in data.extra: del data.extra["password"] log.info("Created Cloud VM '%s'", vm_["name"]) log.debug("'%s' VM creation details:\n%s", vm_["name"], pprint.pformat(data.__dict__)) __utils__["cloud.fire_event"]( "event", "created instance", "salt/cloud/{}/created".format(vm_["name"]), args=__utils__["cloud.filter_event"]( "created", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) return ret
def get_conn(): ''' Return a conn object for the passed VM data ''' vm_ = get_configured_provider() driver = get_driver(Provider.OPENSTACK) authinfo = { 'ex_force_auth_url': config.get_cloud_config_value('identity_url', vm_, __opts__, search_global=False), 'ex_force_service_name': config.get_cloud_config_value('compute_name', vm_, __opts__, search_global=False), 'ex_force_service_region': config.get_cloud_config_value('compute_region', vm_, __opts__, search_global=False), 'ex_tenant_name': config.get_cloud_config_value('tenant', vm_, __opts__, search_global=False), } service_type = config.get_cloud_config_value('service_type', vm_, __opts__, search_global=False) if service_type: authinfo['ex_force_service_type'] = service_type base_url = config.get_cloud_config_value('base_url', vm_, __opts__, search_global=False) if base_url: authinfo['ex_force_base_url'] = base_url insecure = config.get_cloud_config_value('insecure', vm_, __opts__, search_global=False) if insecure: import libcloud.security libcloud.security.VERIFY_SSL_CERT = False user = config.get_cloud_config_value('user', vm_, __opts__, search_global=False) password = config.get_cloud_config_value('password', vm_, __opts__, search_global=False) if password is not None: authinfo['ex_force_auth_version'] = '2.0_password' log.debug('OpenStack authenticating using password') if password == 'USE_KEYRING': # retrieve password from system keyring credential_id = "salt.cloud.provider.{0}".format( __active_provider_name__) logging.debug("Retrieving keyring password for {0} ({1})".format( credential_id, user)) # attempt to retrieve driver specific password first driver_password = salt.utils.cloud.retrieve_password_from_keyring( credential_id, user) if driver_password is None: provider_password = salt.utils.cloud.retrieve_password_from_keyring( credential_id.split(':')[0], # fallback to provider level user) if provider_password is None: raise SaltCloudSystemExit( "Unable to retrieve password from keyring for provider {0}" .format(__active_provider_name__)) else: actual_password = provider_password else: actual_password = driver_password else: actual_password = password return driver(user, actual_password, **authinfo) authinfo['ex_force_auth_version'] = '2.0_apikey' log.debug('OpenStack authenticating using apikey') return driver( user, config.get_cloud_config_value('apikey', vm_, __opts__, search_global=False), **authinfo)
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'openstack', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None: key_filename = os.path.expanduser(key_filename) if not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file \'{0}\' does not exist'.format( key_filename)) vm_['key_filename'] = key_filename salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for \'{0[name]}\''.format(vm_)) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value('keysize', vm_, __opts__)) data = conn.ex_get_node_details(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[ 'change_password'] is True: vm_['password'] = sup.secure_password() conn.ex_set_password(data, vm_['password']) networks(vm_) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id def __query_node_data(vm_, data, floating): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(node))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) # Trigger a failure in the wait for IP function return False running = node['state'] == NodeState.RUNNING if not running: # Still not running, trigger another iteration return if rackconnect(vm_) is True: check_libcloud_version((0, 14, 0), why='rackconnect: True') extra = node.get('extra') rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED': log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = node.get('extra') mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return public = node['public_ips'] if floating: try: name = data.name ip = floating[0].ip_address conn.ex_attach_floating_ip_to_node(data, ip) log.info( 'Attaching floating IP \'{0}\' to node \'{1}\''.format( ip, name)) data.public_ips.append(ip) public = data.public_ips except Exception: # Note(pabelanger): Because we loop, we only want to attach the # floating IP address one. So, expect failures if the IP is # already attached. pass result = [] private = node['private_ips'] if private and not public: log.warning('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warning('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warning( 'Public IP address was not ready when we last checked.' ' Appending public IP address now.') public = data.public_ips else: log.warning('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': data.public_ips = access_ip return data # populate return data with private_ips # when ssh_interface is set to private_ips and public_ips exist if not result and ssh_interface(vm_) == 'private_ips': for private_ip in private: ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data, vm_['floating']), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: {0}'.format(salt_ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['salt_host'] = salt_ip_address vm_['ssh_host'] = ip_address ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) if hasattr(data, 'extra') and 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) return ret
def _salt(fun, *args, **kw): """Execute a salt function on a specific minion Special kwargs: salt_target target to exec things on salt_timeout timeout for jobs salt_job_poll poll interval to wait for job finish result """ try: poll = kw.pop("salt_job_poll") except KeyError: poll = 0.1 try: target = kw.pop("salt_target") except KeyError: target = None try: timeout = int(kw.pop("salt_timeout")) except (KeyError, ValueError): # try to has some low timeouts for very basic commands timeout = __FUN_TIMEOUT.get( fun, 900 # wait up to 15 minutes for the default timeout ) try: kwargs = kw.pop("kwargs") except KeyError: kwargs = {} if not target: infos = get_configured_provider() if not infos: return target = infos["target"] laps = time.time() cache = False if fun in __CACHED_FUNS: cache = True laps = laps // __CACHED_FUNS[fun] try: sargs = salt.utils.json.dumps(args) except TypeError: sargs = "" try: skw = salt.utils.json.dumps(kw) except TypeError: skw = "" try: skwargs = salt.utils.json.dumps(kwargs) except TypeError: skwargs = "" cache_key = (laps, target, fun, sargs, skw, skwargs) if not cache or (cache and (cache_key not in __CACHED_CALLS)): with _client() as conn: runner = _runner() rkwargs = kwargs.copy() rkwargs["timeout"] = timeout rkwargs.setdefault("tgt_type", "list") kwargs.setdefault("tgt_type", "list") ping_retries = 0 # the target(s) have environ one minute to respond # we call 60 ping request, this prevent us # from blindly send commands to unmatched minions ping_max_retries = 60 ping = True # do not check ping... if we are pinguing if fun == "test.ping": ping_retries = ping_max_retries + 1 # be sure that the executors are alive while ping_retries <= ping_max_retries: try: if ping_retries > 0: time.sleep(1) pings = conn.cmd(tgt=target, timeout=10, fun="test.ping") values = list(pings.values()) if not values: ping = False for v in values: if v is not True: ping = False if not ping: raise ValueError("Unreachable") break except Exception: # pylint: disable=broad-except ping = False ping_retries += 1 log.error("%s unreachable, retrying", target) if not ping: raise SaltCloudSystemExit("Target {} unreachable".format(target)) jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs) cret = conn.cmd( tgt=target, fun="saltutil.find_job", arg=[jid], timeout=10, **kwargs ) running = bool(cret.get(target, False)) endto = time.time() + timeout while running: rkwargs = { "tgt": target, "fun": "saltutil.find_job", "arg": [jid], "timeout": 10, } cret = conn.cmd(**rkwargs) running = bool(cret.get(target, False)) if not running: break if running and (time.time() > endto): raise Exception( "Timeout {}s for {} is elapsed".format( timeout, pprint.pformat(rkwargs) ) ) time.sleep(poll) # timeout for the master to return data about a specific job wait_for_res = float({"test.ping": "5"}.get(fun, "120")) while wait_for_res: wait_for_res -= 0.5 cret = runner.cmd("jobs.lookup_jid", [jid, {"__kwarg__": True}]) if target in cret: ret = cret[target] break # recent changes elif "data" in cret and "outputter" in cret: ret = cret["data"] break # special case, some answers may be crafted # to handle the unresponsivness of a specific command # which is also meaningful, e.g. a minion not yet provisioned if fun in ["test.ping"] and not wait_for_res: ret = {"test.ping": False}.get(fun, False) time.sleep(0.5) try: if "is not available." in ret: raise SaltCloudSystemExit( "module/function {} is not available".format(fun) ) except SaltCloudSystemExit: # pylint: disable=try-except-raise raise except TypeError: pass if cache: __CACHED_CALLS[cache_key] = ret elif cache and cache_key in __CACHED_CALLS: ret = __CACHED_CALLS[cache_key] return ret
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance through Novaclient and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') log.info('Creating Cloud VM {0}'.format(vm_['name'])) salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = vm_.copy() try: kwargs['image_id'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['flavor_id'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) kwargs['key_name'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.secgroup_list() group_list = [] for vmg in vm_groups: if vmg in [name for name, details in six.iteritems(avail_groups)]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['security_groups'] = group_list avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['availability_zone'] = avz kwargs['nics'] = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False, default=None) files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['files'] = {} for src_path in files: if os.path.exists(files[src_path]): with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['files'][src_path] = fp_.read() else: kwargs['files'][src_path] = files[src_path] userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['userdata'] = fp.read() kwargs['config_drive'] = config.get_cloud_config_value('config_drive', vm_, __opts__, search_global=False) salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image_id'], 'size': kwargs['flavor_id'] } }, transport=__opts__['transport']) try: data = conn.boot(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on Nova\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', '') return data, vm_
def list_nodes_full(call=None): ''' Return a list of the VMs that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The list_nodes_full function must be called with -f ' 'or --function.') ret = {} location = get_location() params = { 'Action': 'DescribeInstanceStatus', 'RegionId': location, } result = query(params=params) log.debug('Total {0} instance found in Region {1}'.format( result['TotalCount'], location)) if 'Code' in result or result['TotalCount'] == 0: return ret for node in result['InstanceStatuses']['InstanceStatus']: instanceId = node.get('InstanceId', '') params = { 'Action': 'DescribeInstanceAttribute', 'InstanceId': instanceId } items = query(params=params) if 'Code' in items: log.warn('Query instance:{0} attribute failed'.format(instanceId)) continue ret[instanceId] = { 'id': items['InstanceId'], 'name': items['InstanceName'], 'image': items['ImageId'], 'size': 'TODO', 'state': items['Status'] } for item in items: value = items[item] if value is not None: value = str(value) if item == "PublicIpAddress": ret[instanceId]['public_ips'] = items[item]['IpAddress'] if item == "InnerIpAddress": ret[instanceId]['private_ips'] = items[item]['IpAddress'] ret[instanceId][item] = value provider = __active_provider_name__ or 'aliyun' if ':' in provider: comps = provider.split(':') provider = comps[0] __opts__['update_cachedir'] = True salt.utils.cloud.cache_node_list(ret, provider, __opts__) return ret
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'aliyun', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size_id': get_size(vm_), 'image_id': get_image(vm_), 'region_id': __get_location(vm_), 'securitygroup_id': get_securitygroup(vm_), } salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on Aliyun ECS\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(vm_name): data = show_instance(vm_name, call='action') if not data: # Trigger an error in the wait_for_ip function return False if data.get('PublicIpAddress', None) is not None: return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) public_ip = data['PublicIpAddress'][0] log.debug('VM {0} is now running'.format(public_ip)) vm_['ssh_host'] = public_ip # The instance is booted and accessible, let's Salt it! ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) return ret
def create(vm_): """ Provision a single machine """ clone_strategy = vm_.get("clone_strategy") or "full" if clone_strategy not in ("quick", "full"): raise SaltCloudSystemExit( "'clone_strategy' must be one of quick or full. Got '{}'".format( clone_strategy)) ip_source = vm_.get("ip_source") or "ip-learning" if ip_source not in ("ip-learning", "qemu-agent"): raise SaltCloudSystemExit( "'ip_source' must be one of qemu-agent or ip-learning. Got '{}'". format(ip_source)) validate_xml = (vm_.get("validate_xml") if vm_.get("validate_xml") is not None else True) log.info( "Cloning '%s' with strategy '%s' validate_xml='%s'", vm_["name"], clone_strategy, validate_xml, ) try: # Check for required profile parameters before sending any API calls. if (vm_["profile"] and config.is_profile_configured( __opts__, _get_active_provider_name() or "libvirt", vm_["profile"]) is False): return False except AttributeError: pass # TODO: check name qemu/libvirt will choke on some characters (like '/')? name = vm_["name"] __utils__["cloud.fire_event"]( "event", "starting create", "salt/cloud/{}/creating".format(name), args=__utils__["cloud.filter_event"]( "creating", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) key_filename = config.get_cloud_config_value("private_key", vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( "The defined key_filename '{}' does not exist".format( key_filename)) vm_["key_filename"] = key_filename # wait_for_instance requires private_key vm_["private_key"] = key_filename cleanup = [] try: # clone the vm base = vm_["base_domain"] conn = __get_conn(vm_["url"]) try: # for idempotency the salt-bootstrap needs -F argument # script_args: -F clone_domain = conn.lookupByName(name) except libvirtError as e: domain = conn.lookupByName(base) # TODO: ensure base is shut down before cloning xml = domain.XMLDesc(0) kwargs = { "name": name, "base_domain": base, } __utils__["cloud.fire_event"]( "event", "requesting instance", "salt/cloud/{}/requesting".format(name), args={ "kwargs": __utils__["cloud.filter_event"]("requesting", kwargs, list(kwargs)), }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) log.debug("Source machine XML '%s'", xml) domain_xml = ElementTree.fromstring(xml) domain_xml.find("./name").text = name if domain_xml.find("./description") is None: description_elem = ElementTree.Element("description") domain_xml.insert(0, description_elem) description = domain_xml.find("./description") description.text = "Cloned from {}".format(base) domain_xml.remove(domain_xml.find("./uuid")) for iface_xml in domain_xml.findall("./devices/interface"): iface_xml.remove(iface_xml.find("./mac")) # enable IP learning, this might be a default behaviour... # Don't always enable since it can cause problems through libvirt-4.5 if (ip_source == "ip-learning" and iface_xml.find( "./filterref/parameter[@name='CTRL_IP_LEARNING']") is None): iface_xml.append(ElementTree.fromstring(IP_LEARNING_XML)) # If a qemu agent is defined we need to fix the path to its socket # <channel type='unix'> # <source mode='bind' path='/var/lib/libvirt/qemu/channel/target/domain-<dom-name>/org.qemu.guest_agent.0'/> # <target type='virtio' name='org.qemu.guest_agent.0'/> # <address type='virtio-serial' controller='0' bus='0' port='2'/> # </channel> for agent_xml in domain_xml.findall( """./devices/channel[@type='unix']"""): # is org.qemu.guest_agent.0 an option? if (agent_xml.find( """./target[@type='virtio'][@name='org.qemu.guest_agent.0']""" ) is not None): source_element = agent_xml.find( """./source[@mode='bind']""") # see if there is a path element that needs rewriting if source_element and "path" in source_element.attrib: path = source_element.attrib["path"] new_path = path.replace("/domain-{}/".format(base), "/domain-{}/".format(name)) log.debug("Rewriting agent socket path to %s", new_path) source_element.attrib["path"] = new_path for disk in domain_xml.findall( """./devices/disk[@device='disk'][@type='file']"""): # print "Disk: ", ElementTree.tostring(disk) # check if we can clone driver = disk.find("./driver[@name='qemu']") if driver is None: # Err on the safe side raise SaltCloudExecutionFailure( "Non qemu driver disk encountered bailing out.") disk_type = driver.attrib.get("type") log.info("disk attributes %s", disk.attrib) if disk_type == "qcow2": source = disk.find("./source").attrib["file"] pool, volume = find_pool_and_volume(conn, source) if clone_strategy == "quick": new_volume = pool.createXML( create_volume_with_backing_store_xml(volume), 0) else: new_volume = pool.createXMLFrom( create_volume_xml(volume), volume, 0) cleanup.append({"what": "volume", "item": new_volume}) disk.find("./source").attrib["file"] = new_volume.path() elif disk_type == "raw": source = disk.find("./source").attrib["file"] pool, volume = find_pool_and_volume(conn, source) # TODO: more control on the cloned disk type new_volume = pool.createXMLFrom(create_volume_xml(volume), volume, 0) cleanup.append({"what": "volume", "item": new_volume}) disk.find("./source").attrib["file"] = new_volume.path() else: raise SaltCloudExecutionFailure( "Disk type '{}' not supported".format(disk_type)) clone_xml = salt.utils.stringutils.to_str( ElementTree.tostring(domain_xml)) log.debug("Clone XML '%s'", clone_xml) validate_flags = libvirt.VIR_DOMAIN_DEFINE_VALIDATE if validate_xml else 0 clone_domain = conn.defineXMLFlags(clone_xml, validate_flags) cleanup.append({"what": "domain", "item": clone_domain}) clone_domain.createWithFlags(libvirt.VIR_DOMAIN_START_FORCE_BOOT) log.debug("VM '%s'", vm_) if ip_source == "qemu-agent": ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT elif ip_source == "ip-learning": ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE address = salt.utils.cloud.wait_for_ip( get_domain_ip, update_args=(clone_domain, 0, ip_source), timeout=config.get_cloud_config_value("wait_for_ip_timeout", vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value("wait_for_ip_interval", vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value( "wait_for_ip_interval_multiplier", vm_, __opts__, default=1), ) log.info("Address = %s", address) vm_["ssh_host"] = address # the bootstrap script needs to be installed first in /etc/salt/cloud.deploy.d/ # salt-cloud -u is your friend ret = __utils__["cloud.bootstrap"](vm_, __opts__) __utils__["cloud.fire_event"]( "event", "created instance", "salt/cloud/{}/created".format(name), args=__utils__["cloud.filter_event"]( "created", vm_, ["name", "profile", "provider", "driver"]), sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) return ret except Exception: # pylint: disable=broad-except do_cleanup(cleanup) # throw the root cause after cleanup raise
def destroy(name, call=None): """ This function irreversibly destroys a virtual machine on the cloud provider. Before doing so, it should fire an event on the Salt event bus. The tag for this event is `salt/cloud/<vm name>/destroying`. Once the virtual machine has been destroyed, another event is fired. The tag for that event is `salt/cloud/<vm name>/destroyed`. Dependencies: list_nodes @param name: @type name: str @param call: @type call: @return: True if all went well, otherwise an error message @rtype: bool|str """ log.info("Attempting to delete instance %s", name) if call == "function": raise SaltCloudSystemExit( "The destroy action must be called with -d, --destroy, " "-a or --action.") found = [] providers = __opts__.get("providers", {}) providers_to_check = [ _f for _f in [cfg.get("libvirt") for cfg in providers.values()] if _f ] for provider in providers_to_check: conn = __get_conn(provider["url"]) log.info("looking at %s", provider["url"]) try: domain = conn.lookupByName(name) found.append({"domain": domain, "conn": conn}) except libvirtError: pass if not found: return "{} doesn't exist and can't be deleted".format(name) if len(found) > 1: return "{} doesn't identify a unique machine leaving things".format( name) __utils__["cloud.fire_event"]( "event", "destroying instance", "salt/cloud/{}/destroying".format(name), args={ "name": name }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) destroy_domain(found[0]["conn"], found[0]["domain"]) __utils__["cloud.fire_event"]( "event", "destroyed instance", "salt/cloud/{}/destroyed".format(name), args={ "name": name }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], )
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if (vm_['profile'] and config.is_profile_configured(__opts__, (__active_provider_name__ or 'profitbricks'), vm_['profile']) is False): return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) data = None datacenter_id = get_datacenter_id() conn = get_conn() # Assemble list of network interfaces from the cloud profile config. nics = _get_nics(vm_) # Assemble list of volumes from the cloud profile config. volumes = [_get_system_volume(vm_)] if 'volumes' in vm_: volumes.extend(_get_data_volumes(vm_)) # Assembla the composite server object. server = _get_server(vm_, volumes, nics) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args=__utils__['cloud.filter_event']('requesting', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) try: data = conn.create_server(datacenter_id=datacenter_id, server=server) log.info('Create server request ID: {0}'.format(data['requestId']), exc_info_on_loglevel=logging.DEBUG) _wait_for_completion(conn, data, get_wait_timeout(vm_), 'create_server') except Exception as exc: # pylint: disable=W0703 log.error( 'Error creating {0} on ProfitBricks\n\n' 'The following exception was thrown by the profitbricks library ' 'when trying to run the initial deployment: \n{1}'.format( vm_['name'], exc ), exc_info_on_loglevel=logging.DEBUG ) return False vm_['server_id'] = data['id'] def __query_node_data(vm_, data): ''' Query node data until node becomes available. ''' running = False try: data = show_instance(vm_['name'], 'action') if not data: return False log.debug( 'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format( vm_['name'], pprint.pformat(data['name']), data['vmState'] ) ) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format( err ), # Show the trackback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) # Trigger a failure in the wait for IP function return False running = data['vmState'] == 'RUNNING' if not running: # Still not running, trigger another iteration return if ssh_interface(vm_) == 'private_lan' and data['private_ips']: vm_['ssh_host'] = data['private_ips'][0] if ssh_interface(vm_) != 'private_lan' and data['public_ips']: vm_['ssh_host'] = data['public_ips'][0] return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc.message)) log.debug('VM is now running') log.info('Created Cloud VM {0}'.format(vm_)) log.debug( '{0} VM creation details:\n{1}'.format( vm_, pprint.pformat(data) ) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) if 'ssh_host' in vm_: vm_['key_filename'] = get_key_filename(vm_) ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(data) return ret else: raise SaltCloudSystemExit('A valid IP address was not found.')
def create(server_): ''' Create a single BareMetal server from a data dict. ''' try: # Check for required profile parameters before sending any API calls. if server_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'scaleway', server_['profile'], vm_=server_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(server_['name']), args=__utils__['cloud.filter_event']( 'creating', server_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.info('Creating a BareMetal server %s', server_['name']) access_key = config.get_cloud_config_value('access_key', get_configured_provider(), __opts__, search_global=False) commercial_type = config.get_cloud_config_value('commercial_type', server_, __opts__, default='C1') key_filename = config.get_cloud_config_value('ssh_key_file', server_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename)) ssh_password = config.get_cloud_config_value('ssh_password', server_, __opts__) kwargs = { 'name': server_['name'], 'organization': access_key, 'image': get_image(server_), 'commercial_type': commercial_type, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(server_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating %s on Scaleway\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: %s', server_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(server_name): ''' Called to check if the server has a public IP address. ''' data = show_instance(server_name, 'action') if data and data.get('public_ip'): return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(server_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', server_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', server_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(server_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) server_['ssh_host'] = data['public_ip']['address'] server_['ssh_password'] = ssh_password server_['key_filename'] = key_filename ret = __utils__['cloud.bootstrap'](server_, __opts__) ret.update(data) log.info('Created BareMetal server \'%s\'', server_['name']) log.debug('\'%s\' BareMetal server creation details:\n%s', server_['name'], pprint.pformat(data)) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(server_['name']), args=__utils__['cloud.filter_event']( 'created', server_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return ret
def create(vm_): ''' Create a single instance from a data dict. CLI Examples: .. code-block:: bash salt-cloud -p qingcloud-ubuntu-c1m1 hostname1 salt-cloud -m /path/to/mymap.sls -P ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'qingcloud', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) # params params = { 'action': 'RunInstances', 'instance_name': vm_['name'], 'zone': _get_location(vm_), 'instance_type': _get_size(vm_), 'image_id': _get_image(vm_), 'vxnets.1': vm_['vxnets'], 'login_mode': vm_['login_mode'], 'login_keypair': vm_['login_keypair'], } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', params, list(params)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) result = query(params) new_instance_id = result['instances'][0] try: data = salt.utils.cloud.wait_for_ip( _query_node_data, update_args=(new_instance_id, ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) private_ip = data['private_ips'][0] log.debug('VM {0} is now running'.format(private_ip)) vm_['ssh_host'] = private_ip # The instance is booted and accessible, let's Salt it! __utils__['cloud.bootstrap'](vm_, __opts__) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return data
def destroy(name, call=None): ''' Destroy a node. Will check termination protection and warn if enabled. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') salt.utils.cloud.fire_event('event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), {'name': name}, transport=__opts__['transport']) data = show_instance(name, call='action') node = query(method='droplets', droplet_id=data['id'], http_method='delete') ## This is all terribly optomistic: # vm_ = get_vm_config(name=name) # delete_dns_record = config.get_cloud_config_value( # 'delete_dns_record', vm_, __opts__, search_global=False, default=None, # ) # TODO: when _vm config data can be made available, we should honor the configuration settings, # but until then, we should assume stale DNS records are bad, and default behavior should be to # delete them if we can. When this is resolved, also resolve the comments a couple of lines below. delete_dns_record = True if not isinstance(delete_dns_record, bool): raise SaltCloudConfigError( '\'delete_dns_record\' should be a boolean value.') # When the "to do" a few lines up is resolved, remove these lines and use the if/else logic below. log.debug('Deleting DNS records for {0}.'.format(name)) destroy_dns_records(name) # Until the "to do" from line 748 is taken care of, we don't need this logic. # if delete_dns_record: # log.debug('Deleting DNS records for {0}.'.format(name)) # destroy_dns_records(name) # else: # log.debug('delete_dns_record : {0}'.format(delete_dns_record)) # for line in pprint.pformat(dir()).splitlines(): # log.debug('delete context: {0}'.format(line)) salt.utils.cloud.fire_event('event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), {'name': name}, transport=__opts__['transport']) if __opts__.get('update_cachedir', False) is True: salt.utils.cloud.delete_minion_cachedir( name, __active_provider_name__.split(':')[0], __opts__) return node
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p profile_name vm_name ''' key_filename = config.get_cloud_config_value('private_key', vm_, __opts__, search_global=False, default=None) salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0} in {1}'.format( vm_['name'], vm_.get('location', DEFAULT_LOCATION))) ## added . for fqdn hostnames salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9-.') kwargs = { 'name': vm_['name'], 'image': get_image(vm_), 'size': get_size(vm_), 'location': vm_.get('location', DEFAULT_LOCATION) } salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: data = create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on JOYENT\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False ret = {} def __query_node_data(vm_id, vm_location): rcode, data = query(command='my/machines/{0}'.format(vm_id), method='GET', location=vm_location) if rcode not in VALID_RESPONSE_CODES: # Trigger a wait for IP error return False if data['state'] != 'running': # Still not running, trigger another iteration return if isinstance(data['ips'], list) and len(data['ips']) > 0: return data if 'ips' in data: if isinstance(data['ips'], list) and len(data['ips']) <= 0: log.info('New joyent asynchronous machine creation api detected...' '\n\t\t-- please wait for IP addresses to be assigned...') try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(data['id'], vm_.get('location', DEFAULT_LOCATION)), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=5 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=1), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) data = reformat_node(data) ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') if config.get_cloud_config_value('deploy', vm_, __opts__) is True: host = data['public_ips'][0] if ssh_interface(vm_) == 'private_ips': host = data['private_ips'][0] deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': host, 'username': ssh_username, 'key_filename': key_filename, 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=True), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') deploy_kwargs['password'] = config.get_cloud_config_value( 'win_password', vm_, __opts__, default='') # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) deployed = False if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data) log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def _salt(fun, *args, **kw): '''Execute a salt function on a specific minion Special kwargs: salt_target target to exec things on salt_timeout timeout for jobs salt_job_poll poll interval to wait for job finish result ''' try: poll = kw.pop('salt_job_poll') except KeyError: poll = 0.1 try: target = kw.pop('salt_target') except KeyError: target = None try: timeout = int(kw.pop('salt_timeout')) except (KeyError, ValueError): # try to has some low timeouts for very basic commands timeout = __FUN_TIMEOUT.get( fun, 900 # wait up to 15 minutes for the default timeout ) try: kwargs = kw.pop('kwargs') except KeyError: kwargs = {} if not target: infos = get_configured_provider() if not infos: return target = infos['target'] laps = time.time() cache = False if fun in __CACHED_FUNS: cache = True laps = laps // __CACHED_FUNS[fun] try: sargs = json.dumps(args) except TypeError: sargs = '' try: skw = json.dumps(kw) except TypeError: skw = '' try: skwargs = json.dumps(kwargs) except TypeError: skwargs = '' cache_key = (laps, target, fun, sargs, skw, skwargs) if not cache or (cache and (cache_key not in __CACHED_CALLS)): conn = _client() runner = _runner() rkwargs = kwargs.copy() rkwargs['timeout'] = timeout rkwargs.setdefault('expr_form', 'list') kwargs.setdefault('expr_form', 'list') ping_retries = 0 # the target(s) have environ one minute to respond # we call 60 ping request, this prevent us # from blindly send commands to unmatched minions ping_max_retries = 60 ping = True # do not check ping... if we are pinguing if fun == 'test.ping': ping_retries = ping_max_retries + 1 # be sure that the executors are alive while ping_retries <= ping_max_retries: try: if ping_retries > 0: time.sleep(1) pings = conn.cmd(tgt=target, timeout=10, fun='test.ping') values = list(pings.values()) if not values: ping = False for v in values: if v is not True: ping = False if not ping: raise ValueError('Unreachable') break except Exception: ping = False ping_retries += 1 log.error('{0} unreachable, retrying'.format(target)) if not ping: raise SaltCloudSystemExit('Target {0} unreachable'.format(target)) jid = conn.cmd_async(tgt=target, fun=fun, arg=args, kwarg=kw, **rkwargs) cret = conn.cmd(tgt=target, fun='saltutil.find_job', arg=[jid], timeout=10, **kwargs) running = bool(cret.get(target, False)) endto = time.time() + timeout while running: rkwargs = { 'tgt': target, 'fun': 'saltutil.find_job', 'arg': [jid], 'timeout': 10 } cret = conn.cmd(**rkwargs) running = bool(cret.get(target, False)) if not running: break if running and (time.time() > endto): raise Exception('Timeout {0}s for {1} is elapsed'.format( timeout, pformat(rkwargs))) time.sleep(poll) # timeout for the master to return data about a specific job wait_for_res = float({ 'test.ping': '5', }.get(fun, '120')) while wait_for_res: wait_for_res -= 0.5 cret = runner.cmd('jobs.lookup_jid', [jid, {'__kwarg__': True}]) if target in cret: ret = cret[target] break # recent changes elif 'data' in cret and 'outputter' in cret: ret = cret['data'] break # special case, some answers may be crafted # to handle the unresponsivness of a specific command # which is also meaningful, e.g. a minion not yet provisioned if fun in ['test.ping'] and not wait_for_res: ret = { 'test.ping': False, }.get(fun, False) time.sleep(0.5) try: if 'is not available.' in ret: raise SaltCloudSystemExit( 'module/function {0} is not available'.format(fun)) except SaltCloudSystemExit: raise except TypeError: pass if cache: __CACHED_CALLS[cache_key] = ret elif cache and cache_key in __CACHED_CALLS: ret = __CACHED_CALLS[cache_key] return ret
def destroy(name, call=None): """ Destroy a node. .. versionadded:: 2018.3.0 Disconnect a minion from the master, and remove its keys. Optionally, (if ``remove_config_on_destroy`` is ``True``), disables salt-minion from running on the minion, and erases the Salt configuration files from it. Optionally, (if ``shutdown_on_destroy`` is ``True``), orders the minion to halt. CLI Example: .. code-block:: bash salt-cloud --destroy mymachine """ if call == "function": raise SaltCloudSystemExit( "The destroy action must be called with -d, --destroy, " "-a, or --action.") opts = __opts__ __utils__["cloud.fire_event"]( "event", "destroying instance", "salt/cloud/{}/destroying".format(name), args={ "name": name }, sock_dir=opts["sock_dir"], transport=opts["transport"], ) vm_ = get_configured_provider() with salt.client.LocalClient() as local: my_info = local.cmd(name, "grains.get", ["salt-cloud"]) try: vm_.update(my_info[name]) # get profile name to get config value except (IndexError, TypeError): pass if config.get_cloud_config_value("remove_config_on_destroy", vm_, opts, default=True): ret = local.cmd( name, # prevent generating new keys on restart "service.disable", ["salt-minion"], ) if ret and ret[name]: log.info("disabled salt-minion service on %s", name) ret = local.cmd(name, "config.get", ["conf_file"]) if ret and ret[name]: confile = ret[name] ret = local.cmd(name, "file.remove", [confile]) if ret and ret[name]: log.info("removed minion %s configuration file %s", name, confile) ret = local.cmd(name, "config.get", ["pki_dir"]) if ret and ret[name]: pki_dir = ret[name] ret = local.cmd(name, "file.remove", [pki_dir]) if ret and ret[name]: log.info("removed minion %s key files in %s", name, pki_dir) if config.get_cloud_config_value("shutdown_on_destroy", vm_, opts, default=False): ret = local.cmd(name, "system.shutdown") if ret and ret[name]: log.info("system.shutdown for minion %s successful", name) __utils__["cloud.fire_event"]( "event", "destroyed instance", "salt/cloud/{}/destroyed".format(name), args={ "name": name }, sock_dir=opts["sock_dir"], transport=opts["transport"], ) return {"Destroyed": "{} was destroyed.".format(name)}
def destroy(name, conn=None, call=None): ''' Delete a single VM, and all of its volumes ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.') salt.utils.cloud.fire_event( 'event', 'destroying instance', 'salt/cloud/{0}/destroying'.format(name), {'name': name}, ) if not conn: conn = get_conn() # pylint: disable=E0602 node = get_node(conn, name) if node is None: log.error('Unable to find the VM {0}'.format(name)) volumes = conn.list_volumes(node) if volumes is None: log.error('Unable to find volumes of the VM {0}'.format(name)) # TODO add an option like 'delete_sshkeys' below for volume in volumes: if volume.extra['volume_type'] != 'DATADISK': log.info('Ignoring volume type {0}: {1}'.format( volume.extra['volume_type'], volume.name)) continue log.info('Detaching volume: {0}'.format(volume.name)) salt.utils.cloud.fire_event( 'event', 'detaching volume', 'salt/cloud/{0}/detaching'.format(volume.name), {'name': volume.name}, ) if not conn.detach_volume(volume): log.error('Failed to Detach volume: {0}'.format(volume.name)) return False log.info('Detached volume: {0}'.format(volume.name)) salt.utils.cloud.fire_event( 'event', 'detached volume', 'salt/cloud/{0}/detached'.format(volume.name), {'name': volume.name}, ) log.info('Destroying volume: {0}'.format(volume.name)) salt.utils.cloud.fire_event( 'event', 'destroying volume', 'salt/cloud/{0}/destroying'.format(volume.name), {'name': volume.name}, ) if not conn.destroy_volume(volume): log.error('Failed to Destroy volume: {0}'.format(volume.name)) return False log.info('Destroyed volume: {0}'.format(volume.name)) salt.utils.cloud.fire_event( 'event', 'destroyed volume', 'salt/cloud/{0}/destroyed'.format(volume.name), {'name': volume.name}, ) log.info('Destroying VM: {0}'.format(name)) ret = conn.destroy_node(node) if not ret: log.error('Failed to Destroy VM: {0}'.format(name)) return False log.info('Destroyed VM: {0}'.format(name)) # Fire destroy action event = salt.utils.event.SaltEvent('master', __opts__['sock_dir']) salt.utils.cloud.fire_event( 'event', 'destroyed instance', 'salt/cloud/{0}/destroyed'.format(name), {'name': name}, ) if __opts__['delete_sshkeys'] is True: salt.utils.cloud.remove_sshkey(node.public_ips[0]) return True
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on Openstack and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) # Note: This currently requires libcloud trunk avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['ex_availability_zone'] = avz kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] networks = config.get_cloud_config_value('networks', vm_, __opts__, search_global=False) floating = [] if HAS014: if networks is not None: for net in networks: if 'fixed' in net: kwargs['networks'] = [ OpenStackNetwork(n, None, None, None) for n in net['fixed'] ] elif 'floating' in net: pool = OpenStack_1_1_FloatingIpPool( net['floating'], conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud # to dynamically allocate new address but that might raise SaltCloudSystemExit( 'Floating pool {0!r} does not have any more ' 'please create some more or use a different ' 'pool.'.format(net['floating'])) # otherwise, attempt to obtain list without specifying pool # this is the same as 'nova floating-ip-list' elif ssh_interface(vm_) != 'private_ips': try: # This try/except is here because it appears some # *cough* Rackspace *cough* # OpenStack providers return a 404 Not Found for the # floating ip pool URL if there are no pools setup pool = OpenStack_1_1_FloatingIpPool('', conn.connection) for idx in pool.list_floating_ips(): if idx.node_id is None: floating.append(idx) if not floating: # Note(pabelanger): We have no available floating IPs. # For now, we raise an exception and exit. # A future enhancement might be to allow salt-cloud to # dynamically allocate new address but that might be # tricky to manage. raise SaltCloudSystemExit( 'There are no more floating IP addresses ' 'available, please create some more') except Exception as e: if str(e).startswith('404'): pass else: raise vm_['floating'] = floating files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() kwargs['ex_config_drive'] = config.get_cloud_config_value( 'config_drive', vm_, __opts__, search_global=False) salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile'] } }, transport=__opts__['transport']) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', None) return data, vm_
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'nova', vm_['profile']) is False: return False except AttributeError: pass deploy = config.get_cloud_config_value('deploy', vm_, __opts__) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined ssh_key_file {0!r} does not exist'.format( key_filename)) vm_['key_filename'] = key_filename # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) conn = get_conn() if 'instance_id' in vm_: # This was probably created via another process, and doesn't have # things like salt keys created yet, so let's create them now. if 'pub_key' not in vm_ and 'priv_key' not in vm_: log.debug('Generating minion keys for {0[name]!r}'.format(vm_)) vm_['priv_key'], vm_['pub_key'] = salt.utils.cloud.gen_keys( salt.config.get_cloud_config_value('keysize', vm_, __opts__)) data = conn.server_show_libcloud(vm_['instance_id']) if vm_['key_filename'] is None and 'change_password' in __opts__ and __opts__[ 'change_password'] is True: vm_['password'] = sup.secure_password() conn.root_password(vm_['instance_id'], vm_['password']) else: # Put together all of the information required to request the instance, # and then fire off the request for it data, vm_ = request_instance(vm_) # Pull the instance ID, valid for both spot and normal instances vm_['instance_id'] = data.id def __query_node_data(vm_, data): try: node = show_instance(vm_['name'], 'action') log.debug('Loaded node data for {0}:\n{1}'.format( vm_['name'], pprint.pformat(node))) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) # Trigger a failure in the wait for IP function return False running = node['state'] == 'ACTIVE' if not running: # Still not running, trigger another iteration return rackconnectv3 = config.get_cloud_config_value('rackconnectv3', vm_, __opts__, default='False', search_global=False) if rackconnectv3: networkname = rackconnectv3 for network in node['addresses'].get(networkname, []): if network['version'] is 4: access_ip = network['addr'] break vm_['rackconnect'] = True if ssh_interface(vm_) in node['addresses']: networkname = ssh_interface(vm_) for network in node['addresses'].get(networkname, []): if network['version'] is 4: node['extra']['access_ip'] = network['addr'] break vm_['cloudnetwork'] = True if rackconnect(vm_) is True: extra = node.get('extra', {}) rc_status = extra.get('metadata', {}).get('rackconnect_automation_status', '') access_ip = extra.get('access_ip', '') if rc_status != 'DEPLOYED' and not rackconnectv3: log.debug('Waiting for Rackconnect automation to complete') return if managedcloud(vm_) is True: extra = conn.server_show_libcloud(node['id']).extra mc_status = extra.get('metadata', {}).get('rax_service_level_automation', '') if mc_status != 'Complete': log.debug('Waiting for managed cloud automation to complete') return result = [] if 'private_ips' not in node and 'public_ips' not in node and \ 'access_ip' in node.get('extra', {}): result = [node['extra']['access_ip']] private = node.get('private_ips', []) public = node.get('public_ips', []) if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) log.warn(('Public IP address was not ready when we last' ' checked. Appending public IP address now.')) public = data.public_ips else: log.warn('{0} is a private IP'.format(private_ip)) ignore_ip = ignore_cidr(vm_, private_ip) if private_ip not in data.private_ips and not ignore_ip: result.append(private_ip) if rackconnect(vm_) is True: if ssh_interface(vm_) != 'private_ips' or rackconnectv3: data.public_ips = access_ip return data if cloudnetwork(vm_) is True: data.public_ips = access_ip return data if result: log.debug('result = {0}'.format(result)) data.private_ips = result if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) elif rackconnect(vm_) is True and ssh_interface(vm_) != 'private_ips': ip_address = data.public_ips else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if salt.utils.cloud.get_salt_interface(vm_, __opts__) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) elif rackconnect(vm_) is True and salt.utils.cloud.get_salt_interface( vm_, __opts__) != 'private_ips': salt_ip_address = data.public_ips else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: {0}'.format(salt_ip_address)) if not ip_address: raise SaltCloudSystemExit('A valid IP address was not found') vm_['ssh_host'] = ip_address vm_['salt_host'] = salt_ip_address ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data.__dict__) if 'password' in ret['extra']: del ret['extra']['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) return ret
def create(vm_): ''' Create a single VM from a data dict CLI Example: .. code-block:: bash salt-cloud -p proxmox-ubuntu vmhostname ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured(__opts__, __active_provider_name__ or 'proxmox', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass ret = {} __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) log.info('Creating Cloud VM %s', vm_['name']) if 'use_dns' in vm_ and 'ip_address' not in vm_: use_dns = vm_['use_dns'] if use_dns: from socket import gethostbyname, gaierror try: ip_address = gethostbyname(six.text_type(vm_['name'])) except gaierror: log.debug('Resolving of %s failed', vm_['name']) else: vm_['ip_address'] = six.text_type(ip_address) try: newid = _get_next_vmid() data = create_node(vm_, newid) except Exception as exc: log.error( 'Error creating %s on PROXMOX\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: \n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) return False ret['creation_data'] = data name = vm_['name'] # hostname which we know if 'clone' in vm_ and vm_['clone'] is True: vmid = newid else: vmid = data['vmid'] # vmid which we have received host = data['node'] # host which we have received nodeType = data['technology'] # VM tech (Qemu / OpenVZ) if 'agent_get_ip' not in vm_ or vm_['agent_get_ip'] == 0: # Determine which IP to use in order of preference: if 'ip_address' in vm_: ip_address = six.text_type(vm_['ip_address']) elif 'public_ips' in data: ip_address = six.text_type(data['public_ips'][0]) # first IP elif 'private_ips' in data: ip_address = six.text_type(data['private_ips'][0]) # first IP else: raise SaltCloudExecutionFailure("Could not determine an IP address to use") # wait until the vm has been created so we can start it if not wait_for_created(data['upid'], timeout=300): return {'Error': 'Unable to create {0}, command timed out'.format(name)} if 'clone' in vm_ and vm_['clone'] is True and vm_['technology'] == 'qemu': # If we cloned a machine, see if we need to reconfigure any of the options such as net0, # ide2, etc. This enables us to have a different cloud-init ISO mounted for each VM that's # brought up log.info('Configuring cloned VM') # Modify the settings for the VM one at a time so we can see any problems with the values # as quickly as possible for setting in 'sockets', 'cores', 'cpulimit', 'memory', 'onboot', 'agent': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # cloud-init settings for setting in 'ciuser', 'cipassword', 'sshkeys', 'nameserver', 'searchdomain': if setting in vm_: # if the property is set, use it for the VM request postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(3): setting = 'ide{0}'.format(setting_number) if setting in vm_: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(5): setting = 'sata{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(13): setting = 'scsi{0}'.format(setting_number) if setting in vm_: vm_config = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) if setting in vm_config: setting_params = vm_[setting] setting_storage = setting_params.split(':')[0] setting_size = _stringlist_to_dictionary(setting_params)['size'] vm_disk_params = vm_config[setting] vm_disk_storage = vm_disk_params.split(':')[0] vm_disk_size = _stringlist_to_dictionary(vm_disk_params)['size'] # if storage is different, move the disk if setting_storage != vm_disk_storage: postParams = {} postParams['disk'] = setting postParams['storage'] = setting_storage postParams['delete'] = 1 node = query('post', 'nodes/{0}/qemu/{1}/move_disk'.format( vm_['host'], vmid), postParams) data = _parse_proxmox_upid(node, vm_) # wait until the disk has been moved if not wait_for_task(data['upid'], timeout=300): return {'Error': 'Unable to move disk {0}, command timed out'.format( setting)} # if storage is different, move the disk if setting_size != vm_disk_size: postParams = {} postParams['disk'] = setting postParams['size'] = setting_size query('put', 'nodes/{0}/qemu/{1}/resize'.format( vm_['host'], vmid), postParams) else: postParams = {} postParams[setting] = vm_[setting] query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # net strings are a list of comma seperated settings. We need to merge the settings so that # the setting in the profile only changes the settings it touches and the other settings # are left alone. An example of why this is necessary is because the MAC address is set # in here and generally you don't want to alter or have to know the MAC address of the new # instance, but you may want to set the VLAN bridge for example for setting_number in range(20): setting = 'net{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) for setting_number in range(20): setting = 'ipconfig{0}'.format(setting_number) if setting in vm_: data = query('get', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid)) # Generate a dictionary of settings from the existing string new_setting = {} if setting in data: new_setting.update(_stringlist_to_dictionary(data[setting])) # Merge the new settings (as a dictionary) into the existing dictionary to get the # new merged settings if setting_number == 0 and 'ip_address' in vm_: if 'gw' in _stringlist_to_dictionary(vm_[setting]): new_setting.update(_stringlist_to_dictionary( 'ip={0}/24,gw={1}'.format( vm_['ip_address'], _stringlist_to_dictionary(vm_[setting])['gw']))) else: new_setting.update( _stringlist_to_dictionary('ip={0}/24'.format(vm_['ip_address']))) else: new_setting.update(_stringlist_to_dictionary(vm_[setting])) # Convert the dictionary back into a string list postParams = {setting: _dictionary_to_stringlist(new_setting)} query('post', 'nodes/{0}/qemu/{1}/config'.format(vm_['host'], vmid), postParams) # VM has been created. Starting.. if not start(name, vmid, call='action'): log.error('Node %s (%s) failed to start!', name, vmid) raise SaltCloudExecutionFailure # Wait until the VM has fully started log.debug('Waiting for state "running" for vm %s on %s', vmid, host) if not wait_for_state(vmid, 'running'): return {'Error': 'Unable to start {0}, command timed out'.format(name)} # For QEMU VMs, we can get the IP Address from qemu-agent if 'agent_get_ip' in vm_ and vm_['agent_get_ip'] == 1: def __find_agent_ip(vm_): log.debug("Waiting for qemu-agent to start...") endpoint = 'nodes/{0}/qemu/{1}/agent/network-get-interfaces'.format(vm_['host'], vmid) interfaces = query('get', endpoint) # If we get a result from the agent, parse it if 'result' in interfaces: for interface in interfaces['result']: if_name = interface['name'] # Only check ethernet type interfaces, as they are not returned in any order if if_name.startswith('eth') or if_name.startswith('ens'): for if_addr in interface['ip-addresses']: ip_addr = if_addr['ip-address'] # Ensure interface has a valid IPv4 address if if_addr['ip-address-type'] == 'ipv4' and ip_addr is not None: return six.text_type(ip_addr) raise SaltCloudExecutionFailure # We have to wait for a bit for qemu-agent to start try: ip_address = __utils__['cloud.wait_for_fun']( __find_agent_ip, vm_=vm_ ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # If VM was created but we can't connect, destroy it. destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(six.text_type(exc)) log.debug('Using IP address %s', ip_address) ssh_username = config.get_cloud_config_value( 'ssh_username', vm_, __opts__, default='root' ) ssh_password = config.get_cloud_config_value( 'password', vm_, __opts__, ) ret['ip_address'] = ip_address ret['username'] = ssh_username ret['password'] = ssh_password vm_['ssh_host'] = ip_address vm_['password'] = ssh_password ret = __utils__['cloud.bootstrap'](vm_, __opts__) # Report success! log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], ) return ret
def request_instance(vm_=None, call=None): ''' Put together all of the information necessary to request an instance on Openstack and then fire off the request the instance. Returns data about the instance ''' if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The request_instance action must be called with -a or --action.') salt.utils.cloud.check_name(vm_['name'], 'a-zA-Z0-9._-') conn = get_conn() kwargs = {'name': vm_['name']} try: kwargs['image'] = get_image(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find image {1}: {2}\n'.format( vm_['name'], vm_['image'], exc)) try: kwargs['size'] = get_size(conn, vm_) except Exception as exc: raise SaltCloudSystemExit('Error creating {0} on OPENSTACK\n\n' 'Could not find size {1}: {2}\n'.format( vm_['name'], vm_['size'], exc)) # Note: This currently requires libcloud trunk avz = config.get_cloud_config_value('availability_zone', vm_, __opts__, default=None, search_global=False) if avz is not None: kwargs['ex_availability_zone'] = avz kwargs['ex_keyname'] = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) security_groups = config.get_cloud_config_value('security_groups', vm_, __opts__, search_global=False) if security_groups is not None: vm_groups = security_groups.split(',') avail_groups = conn.ex_list_security_groups() group_list = [] for vmg in vm_groups: if vmg in [ag.name for ag in avail_groups]: group_list.append(vmg) else: raise SaltCloudNotFound( 'No such security group: \'{0}\''.format(vmg)) kwargs['ex_security_groups'] = [ g for g in avail_groups if g.name in group_list ] floating = _assign_floating_ips(vm_, conn, kwargs) vm_['floating'] = floating files = config.get_cloud_config_value('files', vm_, __opts__, search_global=False) if files: kwargs['ex_files'] = {} for src_path in files: with salt.utils.fopen(files[src_path], 'r') as fp_: kwargs['ex_files'][src_path] = fp_.read() userdata_file = config.get_cloud_config_value('userdata_file', vm_, __opts__, search_global=False) if userdata_file is not None: with salt.utils.fopen(userdata_file, 'r') as fp: kwargs['ex_userdata'] = fp.read() config_drive = config.get_cloud_config_value('config_drive', vm_, __opts__, default=None, search_global=False) if config_drive is not None: kwargs['ex_config_drive'] = config_drive salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name, 'profile': vm_['profile'] } }, transport=__opts__['transport']) default_profile = {} if 'profile' in vm_ and vm_['profile'] is not None: default_profile = {'profile': vm_['profile']} kwargs['ex_metadata'] = config.get_cloud_config_value( 'metadata', vm_, __opts__, default=default_profile, search_global=False) if not isinstance(kwargs['ex_metadata'], dict): raise SaltCloudConfigError('\'metadata\' should be a dict.') try: data = conn.create_node(**kwargs) except Exception as exc: raise SaltCloudSystemExit( 'Error creating {0} on OpenStack\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: {1}\n'.format(vm_['name'], exc)) vm_['password'] = data.extra.get('password', None) return data, vm_
def create(vm_): ''' Create a single VM from a data dict ''' deploy = config.get_cloud_config_value('deploy', vm_, __opts__) salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) conn = get_conn() kwargs = { 'name': vm_['name'], 'image': get_image(conn, vm_), 'size': get_size(conn, vm_) } salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), { 'kwargs': { 'name': kwargs['name'], 'image': kwargs['image'].name, 'size': kwargs['size'].name } }, transport=__opts__['transport']) try: data = conn.create_node(**kwargs) except Exception as exc: log.error( 'Error creating {0} on RACKSPACE\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n{1}'.format(vm_['name'], exc), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(vm_, data): running = False try: node = show_instance(vm_['name'], 'action') running = (node['state'] == NodeState.RUNNING) log.debug( 'Loaded node data for {0}:\nname: {1}\nstate: {2}'.format( vm_['name'], pprint.pformat(node['name']), node['state'])) except Exception as err: log.error( 'Failed to get nodes list: {0}'.format(err), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) # Trigger a failure in the wait for IP function return False if not running: # Still not running, trigger another iteration return private = node['private_ips'] public = node['public_ips'] if private and not public: log.warn('Private IPs returned, but not public... Checking for ' 'misidentified IPs') for private_ip in private: private_ip = preferred_ip(vm_, [private_ip]) if salt.utils.cloud.is_public_ip(private_ip): log.warn('{0} is a public IP'.format(private_ip)) data.public_ips.append(private_ip) else: log.warn('{0} is a private IP'.format(private_ip)) if private_ip not in data.private_ips: data.private_ips.append(private_ip) if ssh_interface(vm_) == 'private_ips' and data.private_ips: return data if private: data.private_ips = private if ssh_interface(vm_) == 'private_ips': return data if public: data.public_ips = public if ssh_interface(vm_) != 'private_ips': return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=25 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) log.debug('VM is now running') if ssh_interface(vm_) == 'private_ips': ip_address = preferred_ip(vm_, data.private_ips) else: ip_address = preferred_ip(vm_, data.public_ips) log.debug('Using IP address {0}'.format(ip_address)) if get_salt_interface(vm_) == 'private_ips': salt_ip_address = preferred_ip(vm_, data.private_ips) log.info('Salt interface set to: {0}'.format(salt_ip_address)) else: salt_ip_address = preferred_ip(vm_, data.public_ips) log.debug('Salt interface set to: {0}'.format(salt_ip_address)) if not ip_address: raise SaltCloudSystemExit('No IP addresses could be found.') ssh_username = config.get_cloud_config_value('ssh_username', vm_, __opts__, default='root') ret = {} if deploy is True: deploy_script = script(vm_) deploy_kwargs = { 'opts': __opts__, 'host': ip_address, 'salt_host': salt_ip_address, 'username': ssh_username, 'password': data.extra['password'], 'script': deploy_script.script, 'name': vm_['name'], 'tmp_dir': config.get_cloud_config_value('tmp_dir', vm_, __opts__, default='/tmp/.saltcloud'), 'deploy_command': config.get_cloud_config_value( 'deploy_command', vm_, __opts__, default='/tmp/.saltcloud/deploy.sh', ), 'start_action': __opts__['start_action'], 'parallel': __opts__['parallel'], 'sock_dir': __opts__['sock_dir'], 'conf_file': __opts__['conf_file'], 'minion_pem': vm_['priv_key'], 'minion_pub': vm_['pub_key'], 'keep_tmp': __opts__['keep_tmp'], 'preseed_minion_keys': vm_.get('preseed_minion_keys', None), 'sudo': config.get_cloud_config_value('sudo', vm_, __opts__, default=(ssh_username != 'root')), 'sudo_password': config.get_cloud_config_value('sudo_password', vm_, __opts__, default=None), 'tty': config.get_cloud_config_value('tty', vm_, __opts__, default=False), 'display_ssh_output': config.get_cloud_config_value('display_ssh_output', vm_, __opts__, default=True), 'script_args': config.get_cloud_config_value('script_args', vm_, __opts__), 'script_env': config.get_cloud_config_value('script_env', vm_, __opts__), 'minion_conf': salt.utils.cloud.minion_config(__opts__, vm_) } # Deploy salt-master files, if necessary if config.get_cloud_config_value('make_master', vm_, __opts__) is True: deploy_kwargs['make_master'] = True deploy_kwargs['master_pub'] = vm_['master_pub'] deploy_kwargs['master_pem'] = vm_['master_pem'] master_conf = salt.utils.cloud.master_config(__opts__, vm_) deploy_kwargs['master_conf'] = master_conf if master_conf.get('syndic_master', None): deploy_kwargs['make_syndic'] = True deploy_kwargs['make_minion'] = config.get_cloud_config_value( 'make_minion', vm_, __opts__, default=True) # Check for Windows install params win_installer = config.get_cloud_config_value('win_installer', vm_, __opts__) if win_installer: deploy_kwargs['win_installer'] = win_installer minion = salt.utils.cloud.minion_config(__opts__, vm_) deploy_kwargs['master'] = minion['master'] deploy_kwargs['username'] = config.get_cloud_config_value( 'win_username', vm_, __opts__, default='Administrator') win_pass = config.get_cloud_config_value('win_password', vm_, __opts__, default='') if win_pass: deploy_kwargs['password'] = win_pass # Store what was used to the deploy the VM event_kwargs = copy.deepcopy(deploy_kwargs) del event_kwargs['minion_pem'] del event_kwargs['minion_pub'] del event_kwargs['sudo_password'] if 'password' in event_kwargs: del event_kwargs['password'] ret['deploy_kwargs'] = event_kwargs salt.utils.cloud.fire_event('event', 'executing deploy script', 'salt/cloud/{0}/deploying'.format( vm_['name']), {'kwargs': event_kwargs}, transport=__opts__['transport']) if win_installer: deployed = salt.utils.cloud.deploy_windows(**deploy_kwargs) else: deployed = salt.utils.cloud.deploy_script(**deploy_kwargs) if deployed: log.info('Salt installed on {0}'.format(vm_['name'])) else: log.error('Failed to deploy and start Salt on Cloud VM {0}'.format( vm_['name'])) ret.update(data.__dict__) if 'password' in data.extra: del data.extra['password'] log.info('Created Cloud VM {0[name]!r}'.format(vm_)) log.debug('{0[name]!r} VM creation details:\n{1}'.format( vm_, pprint.pformat(data.__dict__))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['provider'], }, transport=__opts__['transport']) return ret
def create(vm_): ''' Provision a single machine ''' clone_strategy = vm_.get('clone_strategy') or 'full' if clone_strategy not in set(['quick', 'full']): raise SaltCloudSystemExit( "'clone_strategy' must be one of quick or full. Got '{0}'".format( clone_strategy)) ip_source = vm_.get('ip_source') or 'ip-learning' if ip_source not in set(['ip-learning', 'qemu-agent']): raise SaltCloudSystemExit( "'ip_source' must be one of qemu-agent or ip-learning. Got '{0}'". format(ip_source)) validate_xml = vm_.get('validate_xml') if vm_.get( 'validate_xml') is not None else True log.info("Cloning '{0}' with strategy '{1}' validate_xml='{2}'".format( vm_['name'], clone_strategy, validate_xml)) try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'libvirt', vm_['profile']) is False: return False except AttributeError: pass # TODO: check name qemu/libvirt will choke on some characters (like '/')? name = vm_['name'] __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(name), args=__utils__['cloud.filter_event']( 'creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) key_filename = config.get_cloud_config_value('private_key', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename)) vm_['key_filename'] = key_filename # wait_for_instance requires private_key vm_['private_key'] = key_filename cleanup = [] try: # clone the vm base = vm_['base_domain'] conn = __get_conn(vm_['url']) try: # for idempotency the salt-bootstrap needs -F argument # script_args: -F clone_domain = conn.lookupByName(name) except libvirtError as e: domain = conn.lookupByName(base) # TODO: ensure base is shut down before cloning xml = domain.XMLDesc(0) kwargs = { 'name': name, 'base_domain': base, } __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(name), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) log.debug("Source machine XML '{0}'".format(xml)) domain_xml = ElementTree.fromstring(xml) domain_xml.find('./name').text = name if domain_xml.find('./description') is None: description_elem = ElementTree.Element('description') domain_xml.insert(0, description_elem) description = domain_xml.find('./description') description.text = "Cloned from {0}".format(base) domain_xml.remove(domain_xml.find('./uuid')) for iface_xml in domain_xml.findall('./devices/interface'): iface_xml.remove(iface_xml.find('./mac')) # enable IP learning, this might be a default behaviour... if iface_xml.find( "./filterref/parameter[@name='CTRL_IP_LEARNING']" ) is None: iface_xml.append(ElementTree.fromstring(IP_LEARNING_XML)) # If a qemu agent is defined we need to fix the path to its socket # <channel type='unix'> # <source mode='bind' path='/var/lib/libvirt/qemu/channel/target/domain-<dom-name>/org.qemu.guest_agent.0'/> # <target type='virtio' name='org.qemu.guest_agent.0'/> # <address type='virtio-serial' controller='0' bus='0' port='2'/> # </channel> for agent_xml in domain_xml.findall( """./devices/channel[@type='unix']"""): # is org.qemu.guest_agent.0 an option? if agent_xml.find( """./target[@type='virtio'][@name='org.qemu.guest_agent.0']""" ) is not None: source_element = agent_xml.find( """./source[@mode='bind']""") # see if there is a path element that needs rewriting if source_element and 'path' in source_element.attrib: path = source_element.attrib['path'] new_path = path.replace('/domain-{0}/'.format(base), '/domain-{0}/'.format(name)) log.debug("Rewriting agent socket path to {0}".format( new_path)) source_element.attrib['path'] = new_path for disk in domain_xml.findall( """./devices/disk[@device='disk'][@type='file']"""): # print "Disk: ", ElementTree.tostring(disk) # check if we can clone driver = disk.find("./driver[@name='qemu']") if driver is None: # Err on the safe side raise SaltCloudExecutionFailure( "Non qemu driver disk encountered bailing out.") disk_type = driver.attrib.get('type') log.info("disk attributes {0}".format(disk.attrib)) if disk_type == 'qcow2': source = disk.find("./source").attrib['file'] pool, volume = find_pool_and_volume(conn, source) if clone_strategy == 'quick': new_volume = pool.createXML( create_volume_with_backing_store_xml(volume), 0) else: new_volume = pool.createXMLFrom( create_volume_xml(volume), volume, 0) cleanup.append({'what': 'volume', 'item': new_volume}) disk.find("./source").attrib['file'] = new_volume.path() elif disk_type == 'raw': source = disk.find("./source").attrib['file'] pool, volume = find_pool_and_volume(conn, source) # TODO: more control on the cloned disk type new_volume = pool.createXMLFrom(create_volume_xml(volume), volume, 0) cleanup.append({'what': 'volume', 'item': new_volume}) disk.find("./source").attrib['file'] = new_volume.path() else: raise SaltCloudExecutionFailure( "Disk type '{0}' not supported".format(disk_type)) clone_xml = ElementTree.tostring(domain_xml) log.debug("Clone XML '{0}'".format(clone_xml)) validate_flags = libvirt.VIR_DOMAIN_DEFINE_VALIDATE if validate_xml else 0 clone_domain = conn.defineXMLFlags(clone_xml, validate_flags) cleanup.append({'what': 'domain', 'item': clone_domain}) clone_domain.createWithFlags(libvirt.VIR_DOMAIN_START_FORCE_BOOT) log.debug("VM '{0}'".format(vm_)) if ip_source == 'qemu-agent': ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_AGENT elif ip_source == 'ip-learning': ip_source = libvirt.VIR_DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE address = salt.utils.cloud.wait_for_ip( get_domain_ip, update_args=(clone_domain, 0, ip_source), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value( 'wait_for_ip_interval_multiplier', vm_, __opts__, default=1), ) log.info('Address = {0}'.format(address)) vm_['ssh_host'] = address # the bootstrap script needs to be installed first in /etc/salt/cloud.deploy.d/ # salt-cloud -u is your friend ret = __utils__['cloud.bootstrap'](vm_, __opts__) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(name), args=__utils__['cloud.filter_event']( 'created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport']) return ret except Exception as e: # pylint: disable=broad-except # Try to clean up in as much cases as possible log.info( 'Cleaning up after exception clean up items: {0}'.format(cleanup)) for leftover in cleanup: what = leftover['what'] item = leftover['item'] if what == 'domain': destroy_domain(conn, item) if what == 'volume': item.delete() raise e
def create(vm_): ''' Create a single VM from a data dict ''' try: # Check for required profile parameters before sending any API calls. if vm_['profile'] and config.is_profile_configured( __opts__, __active_provider_name__ or 'digital_ocean', vm_['profile'], vm_=vm_) is False: return False except AttributeError: pass # Since using "provider: <provider-engine>" is deprecated, alias provider # to use driver: "driver: <provider-engine>" if 'provider' in vm_: vm_['driver'] = vm_.pop('provider') salt.utils.cloud.fire_event('event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) log.info('Creating Cloud VM {0}'.format(vm_['name'])) kwargs = { 'name': vm_['name'], 'size': get_size(vm_), 'image': get_image(vm_), 'region': get_location(vm_), 'ssh_keys': [] } # backwards compat ssh_key_name = config.get_cloud_config_value('ssh_key_name', vm_, __opts__, search_global=False) if ssh_key_name: kwargs['ssh_keys'].append(get_keyid(ssh_key_name)) ssh_key_names = config.get_cloud_config_value('ssh_key_names', vm_, __opts__, search_global=False, default=False) if ssh_key_names: for key in ssh_key_names.split(','): kwargs['ssh_keys'].append(get_keyid(key)) key_filename = config.get_cloud_config_value('ssh_key_file', vm_, __opts__, search_global=False, default=None) if key_filename is not None and not os.path.isfile(key_filename): raise SaltCloudConfigError( 'The defined key_filename \'{0}\' does not exist'.format( key_filename)) if key_filename is None: raise SaltCloudConfigError( 'The DigitalOcean driver requires an ssh_key_file and an ssh_key_name ' 'because it does not supply a root password upon building the server.' ) private_networking = config.get_cloud_config_value( 'private_networking', vm_, __opts__, search_global=False, default=None, ) if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError( "'private_networking' should be a boolean value.") kwargs['private_networking'] = private_networking backups_enabled = config.get_cloud_config_value( 'backups_enabled', vm_, __opts__, search_global=False, default=None, ) if backups_enabled is not None: if not isinstance(backups_enabled, bool): raise SaltCloudConfigError( "'backups_enabled' should be a boolean value.") kwargs['backups'] = backups_enabled ipv6 = config.get_cloud_config_value( 'ipv6', vm_, __opts__, search_global=False, default=None, ) if ipv6 is not None: if not isinstance(ipv6, bool): raise SaltCloudConfigError("'ipv6' should be a boolean value.") kwargs['ipv6'] = ipv6 create_dns_record = config.get_cloud_config_value( 'create_dns_record', vm_, __opts__, search_global=False, default=None, ) if create_dns_record: log.info('create_dns_record: will attempt to write DNS records') default_dns_domain = None dns_domain_name = vm_['name'].split('.') if len(dns_domain_name) > 2: log.debug( 'create_dns_record: inferring default dns_hostname, dns_domain from minion name as FQDN' ) default_dns_hostname = '.'.join(dns_domain_name[:-2]) default_dns_domain = '.'.join(dns_domain_name[-2:]) else: log.debug( "create_dns_record: can't infer dns_domain from {0}".format( vm_['name'])) default_dns_hostname = dns_domain_name[0] dns_hostname = config.get_cloud_config_value( 'dns_hostname', vm_, __opts__, search_global=False, default=default_dns_hostname, ) dns_domain = config.get_cloud_config_value( 'dns_domain', vm_, __opts__, search_global=False, default=default_dns_domain, ) if dns_hostname and dns_domain: log.info( 'create_dns_record: using dns_hostname="{0}", dns_domain="{1}"' .format(dns_hostname, dns_domain)) __add_dns_addr__ = lambda t, d: post_dns_record( dns_domain, dns_hostname, t, d) log.debug('create_dns_record: {0}'.format(__add_dns_addr__)) else: log.error( 'create_dns_record: could not determine dns_hostname and/or dns_domain' ) raise SaltCloudConfigError( '\'create_dns_record\' must be a dict specifying "domain" ' 'and "hostname" or the minion name must be an FQDN.') salt.utils.cloud.fire_event('event', 'requesting instance', 'salt/cloud/{0}/requesting'.format( vm_['name']), {'kwargs': kwargs}, transport=__opts__['transport']) try: ret = create_node(kwargs) except Exception as exc: log.error( 'Error creating {0} on DIGITAL_OCEAN\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment: {1}'.format(vm_['name'], str(exc)), # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG) return False def __query_node_data(vm_name): data = show_instance(vm_name, 'action') if not data: # Trigger an error in the wait_for_ip function return False if data['networks'].get('v4'): for network in data['networks']['v4']: if network['type'] == 'public': return data return False try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_['name'], ), timeout=config.get_cloud_config_value('wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value('wait_for_ip_interval', vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) if not vm_.get('ssh_host'): vm_['ssh_host'] = None # add DNS records, set ssh_host, default to first found IP, preferring IPv4 for ssh bootstrap script target addr_families, dns_arec_types = (('v4', 'v6'), ('A', 'AAAA')) arec_map = dict(list(zip(addr_families, dns_arec_types))) for facing, addr_family, ip_address in [ (net['type'], family, net['ip_address']) for family in addr_families for net in data['networks'][family] ]: log.info('found {0} IP{1} interface for "{2}"'.format( facing, addr_family, ip_address)) dns_rec_type = arec_map[addr_family] if facing == 'public': if create_dns_record: __add_dns_addr__(dns_rec_type, ip_address) if not vm_['ssh_host']: vm_['ssh_host'] = ip_address if vm_['ssh_host'] is None: raise SaltCloudSystemExit( 'No suitable IP addresses found for ssh minion bootstrapping: {0}'. format(repr(data['networks']))) log.debug( 'Found public IP address to use for ssh minion bootstrapping: {0}'. format(vm_['ssh_host'])) vm_['key_filename'] = key_filename ret = salt.utils.cloud.bootstrap(vm_, __opts__) ret.update(data) log.info('Created Cloud VM \'{0[name]}\''.format(vm_)) log.debug('\'{0[name]}\' VM creation details:\n{1}'.format( vm_, pprint.pformat(data))) salt.utils.cloud.fire_event('event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), { 'name': vm_['name'], 'profile': vm_['profile'], 'provider': vm_['driver'], }, transport=__opts__['transport']) return ret
def query_instance(vm_=None, call=None): ''' Query an instance upon creation from the Joyent API ''' if isinstance(vm_, six.string_types) and call == 'action': vm_ = {'name': vm_, 'provider': 'joyent'} if call == 'function': # Technically this function may be called other ways too, but it # definitely cannot be called with --function. raise SaltCloudSystemExit( 'The query_instance action must be called with -a or --action.' ) __utils__['cloud.fire_event']( 'event', 'querying instance', 'salt/cloud/{0}/querying'.format(vm_['name']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) def _query_ip_address(): data = show_instance(vm_['name'], call='action') if not data: log.error( 'There was an error while querying Joyent. Empty response' ) # Trigger a failure in the wait for IP function return False if isinstance(data, dict) and 'error' in data: log.warning( 'There was an error in the query {0}'.format(data.get('error')) ) # Trigger a failure in the wait for IP function return False log.debug('Returned query data: {0}'.format(data)) if 'primaryIp' in data[1]: # Wait for SSH to be fully configured on the remote side if data[1]['state'] == 'running': return data[1]['primaryIp'] return None try: data = salt.utils.cloud.wait_for_ip( _query_ip_address, timeout=config.get_cloud_config_value( 'wait_for_ip_timeout', vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value( 'wait_for_ip_interval', vm_, __opts__, default=10), interval_multiplier=config.get_cloud_config_value( 'wait_for_ip_interval_multiplier', vm_, __opts__, default=1), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! pass #destroy(vm_['name']) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc)) return data
def query(method='droplets', droplet_id=None, command=None, args=None, http_method='get'): ''' Make a web call to DigitalOcean ''' base_path = str( config.get_cloud_config_value( 'api_root', get_configured_provider(), __opts__, search_global=False, default='https://api.digitalocean.com/v2')) path = '{0}/{1}/'.format(base_path, method) if droplet_id: path += '{0}/'.format(droplet_id) if command: path += command if not isinstance(args, dict): args = {} personal_access_token = config.get_cloud_config_value( 'personal_access_token', get_configured_provider(), __opts__, search_global=False) data = json.dumps(args) requester = getattr(requests, http_method) request = requester(path, data=data, headers={ 'Authorization': 'Bearer ' + personal_access_token, 'Content-Type': 'application/json' }) if request.status_code > 299: raise SaltCloudSystemExit( 'An error occurred while querying DigitalOcean. HTTP Code: {0} ' 'Error: \'{1}\''.format( request.status_code, # request.read() request.text)) log.debug(request.url) # success without data if request.status_code == 204: return True content = request.text result = json.loads(content) if result.get('status', '').lower() == 'error': raise SaltCloudSystemExit( pprint.pformat(result.get('error_message', {}))) return result
def create_lb(kwargs=None, call=None): r""" Create a load-balancer configuration. CLI Example: .. code-block:: bash salt-cloud -f create_lb dimensiondata \ name=dev-lb port=80 protocol=http \ members=w1,w2,w3 algorithm=ROUND_ROBIN """ conn = get_conn() if call != "function": raise SaltCloudSystemExit( "The create_lb function must be called with -f or --function.") if not kwargs or "name" not in kwargs: log.error("A name must be specified when creating a health check.") return False if "port" not in kwargs: log.error( "A port or port-range must be specified for the load-balancer.") return False if "networkdomain" not in kwargs: log.error("A network domain must be specified for the load-balancer.") return False if "members" in kwargs: members = [] ip = "" membersList = kwargs.get("members").split(",") log.debug("MemberList: %s", membersList) for member in membersList: try: log.debug("Member: %s", member) node = get_node(conn, member) # pylint: disable=not-callable log.debug("Node: %s", node) ip = node.private_ips[0] except Exception as err: # pylint: disable=broad-except log.error( "Failed to get node ip: %s", err, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG, ) members.append(Member(ip, ip, kwargs["port"])) else: members = None log.debug("Members: %s", members) networkdomain = kwargs["networkdomain"] name = kwargs["name"] port = kwargs["port"] protocol = kwargs.get("protocol", None) algorithm = kwargs.get("algorithm", None) lb_conn = get_lb_conn(conn) network_domains = conn.ex_list_network_domains() network_domain = [y for y in network_domains if y.name == networkdomain][0] log.debug("Network Domain: %s", network_domain.id) lb_conn.ex_set_current_network_domain(network_domain.id) event_data = _to_event_data(kwargs) __utils__["cloud.fire_event"]( "event", "create load_balancer", "salt/cloud/loadbalancer/creating", args=event_data, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) lb = lb_conn.create_balancer(name, port, protocol, algorithm, members) event_data = _to_event_data(kwargs) __utils__["cloud.fire_event"]( "event", "created load_balancer", "salt/cloud/loadbalancer/created", args=event_data, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) return _expand_balancer(lb)
def create(vm_): """ Create a single VM from a data dict """ try: # Check for required profile parameters before sending any API calls. if (vm_["profile"] and config.is_profile_configured( __opts__, (_get_active_provider_name() or "oneandone"), vm_["profile"]) is False): return False except AttributeError: pass data = None conn = get_conn() hdds = [] # Assemble the composite server object. server = _get_server(vm_) if not bool(server.specs["hardware"]["fixed_instance_size_id"]): # Assemble the hdds object. hdds = _get_hdds(vm_) __utils__["cloud.fire_event"]( "event", "requesting instance", "salt/cloud/{}/requesting".format(vm_["name"]), args={ "name": vm_["name"] }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) try: data = conn.create_server(server=server, hdds=hdds) _wait_for_completion(conn, get_wait_timeout(vm_), data["id"]) except Exception as exc: # pylint: disable=W0703 log.error( "Error creating %s on 1and1\n\n" "The following exception was thrown by the 1and1 library " "when trying to run the initial deployment: \n%s", vm_["name"], exc, exc_info_on_loglevel=logging.DEBUG, ) return False vm_["server_id"] = data["id"] password = data["first_password"] def __query_node_data(vm_, data): """ Query node data until node becomes available. """ running = False try: data = show_instance(vm_["name"], "action") if not data: return False log.debug( "Loaded node data for %s:\nname: %s\nstate: %s", vm_["name"], pprint.pformat(data["name"]), data["status"]["state"], ) except Exception as err: # pylint: disable=broad-except log.error( "Failed to get nodes list: %s", err, # Show the trackback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG, ) # Trigger a failure in the wait for IP function return False running = data["status"]["state"].lower() == "powered_on" if not running: # Still not running, trigger another iteration return vm_["ssh_host"] = data["ips"][0]["ip"] return data try: data = salt.utils.cloud.wait_for_ip( __query_node_data, update_args=(vm_, data), timeout=config.get_cloud_config_value("wait_for_ip_timeout", vm_, __opts__, default=10 * 60), interval=config.get_cloud_config_value("wait_for_ip_interval", vm_, __opts__, default=10), ) except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc: try: # It might be already up, let's destroy it! destroy(vm_["name"]) except SaltCloudSystemExit: pass finally: raise SaltCloudSystemExit(str(exc.message)) log.debug("VM is now running") log.info("Created Cloud VM %s", vm_) log.debug("%s VM creation details:\n%s", vm_, pprint.pformat(data)) __utils__["cloud.fire_event"]( "event", "created instance", "salt/cloud/{}/created".format(vm_["name"]), args={ "name": vm_["name"], "profile": vm_["profile"], "provider": vm_["driver"], }, sock_dir=__opts__["sock_dir"], transport=__opts__["transport"], ) if "ssh_host" in vm_: vm_["password"] = password vm_["key_filename"] = get_key_filename(vm_) ret = __utils__["cloud.bootstrap"](vm_, __opts__) ret.update(data) return ret else: raise SaltCloudSystemExit("A valid IP address was not found.")