def upgrade_vt(target, ret=None, output=True): '''Upgrade LXC hosts This will reboot all containers upon lxc upgrade Containers are marked as being rebooted, and unmarked as soon as this script unmark explicitly them to be done. ''' func_name = 'mc_cloud_lxc.upgrade_vt {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if not ret: ret = result() ret['comment'] += yellow('Upgrading lxc on {0}\n'.format(target)) version = cli('cmd.run', 'lxc-info --version', salt_target=target) # run the install SLS which should take care of upgrading for step in [configure_install_lxc]: try: step(target, ret=ret, output=False) except FailedStepError: ret['result'] = False ret['comment'] += red('Failed to upgrade lxc\n') return ret # after upgrading nversion = cli('cmd.run', 'lxc-info --version', salt_target=target) if nversion != version: containers = cli('lxc.list', salt_target=target) reg = cli('mc_macros.update_local_registry', 'lxc_to_restart', {'todo': containers.get('running', [])}, salt_target=target) ret['comment'] += red('Upgraded lxc\n') else: ret['comment'] += red('lxc was already at the last version\n') reg = cli('mc_macros.get_local_registry', 'lxc_to_restart', salt_target=target) todo = reg.get('todo', []) done = [] for lxc in todo: try: stopret = cli('lxc.stop', lxc, salt_target=target) if not stopret['result']: raise ValueError('wont stop') startret = cli('lxc.start', lxc, salt_target=target) if not startret['result']: raise ValueError('wont start') ret['comment'] += yellow('Rebooted {0}\n'.format(lxc)) done.append(lxc) except Exception, ex: ret['result'] = False ret['comment'] += yellow( 'lxc {0} failed to' ' reboot: {1}\n'.format(lxc, ex.message))
def step(vm, step, compute_node=None, vt=None, ret=None, output=True): '''Execute a step on a VM noder''' func_name = 'mc_cloud_vm.provision.step {0} {1}'.format(vm, step) __salt__['mc_api.time_log']('start {0}'.format(func_name)) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) if ret is None: ret = result() pre_vid_ = 'mc_cloud_{0}.vm_{1}'.format(vt, step) id_ = 'mc_cloud_vm.vm_{1}'.format(vt, step) post_vid_ = 'mc_cloud_{0}.post_vm_{1}'.format(vt, step) for cid_ in [pre_vid_, id_, post_vid_]: if (not ret['result']) or (cid_ not in __salt__): continue try: ret = __salt__[cid_](vm, compute_node=compute_node, vt=vt, ret=ret, output=False) check_point(ret, __opts__, output=output) except FailedStepError: ret['result'] = False except Exception, exc: trace = traceback.format_exc() ret['trace'] += 'lxcprovision: {0} in {1}\n'.format( exc, cid_) ret['trace'] += trace ret['result'] = False ret['comment'] += red('unmanaged exception for ' '{0}/{1}/{2}'.format(compute_node, vt, vm)) if ret['result']: ret['trace'] = '' ret['output'] = ''
def vm_ping(vm, compute_node=None, vt=None, ret=None, output=True): '''ping a specific vm on a specific compute node compute_node where to act vm vm to ping :: mastersalt-run -lall mc_cloud_vm.vm_ping foo.domain.tld ''' __salt__['mc_cloud_vm.lazy_register_configuration']( vm, compute_node) func_name = 'mc_cloud_vm.provision.ping {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) if ret is None: ret = result() try: ping = cli('test.ping', salt_target=vm) except Exception: ret['trace'] += "{0}\n".format(traceback.format_exc()) ping = False ret['result'] = ping if ret['result']: comment = green('VM {0} is pinguable\n') else: comment = red('VM {0} is unreachable\n') ret['comment'] += comment.format(vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def register_configuration(target, ret=None, output=True): ''' drop the compute node configuration ''' func_name = 'mc_compute_node.register_configuration {0}'.format(target) if ret is None: ret = result() __salt__['mc_api.time_log']('start {0}'.format(func_name)) settings = __salt__['mc_cloud_compute_node.cn_sls_pillar'](target) cret = cli( 'mc_macros.update_local_registry', 'cloud_compute_node_settings', settings, registry_format='pack', salt_target=target) if ( isinstance(cret, dict) and( 'makina-states.local.' 'cloud_compute_node_settings.cnSettings' in cret ) ): ret['result'] = True ret['comment'] += yellow('Configuration stored' ' on {0}\n'.format(target)) else: ret['result'] = False ret['comment'] += red('Configuration failed to store' ' on {0}\n'.format(target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def provision(vm, compute_node=None, vt=None, steps=None, ret=None, output=True): '''provision a vm compute_node where to act vt virtual type vm vm to spawn steps list or comma separated list of steps Default:: ['spawn', 'hostsfile', 'sshkeys', 'grains', 'initial_setup', 'initial_highstate'] :: mastersalt-run -lall mc_cloud_vm.provision foo.domain.tld ''' func_name = 'mc_cloud_vm.provision {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) if isinstance(steps, basestring): steps = steps.split(',') if steps is None: steps = ['register_configuration_on_cn', 'spawn', 'register_configuration', 'preprovision', # 'sshkeys', # 'hostsfile', # 'grains', # 'markers', 'initial_setup', 'initial_highstate'] if ret is None: ret = result() for step in steps: cret = __salt__['mc_cloud_vm.step'](vm, step, compute_node=compute_node, vt=vt, output=False) merge_results(ret, cret) if ret['result']: ret['comment'] += green( '{0}/{1}/{2} deployed\n').format(compute_node, vt, vm) else: ret['comment'] += red( '{0}/{1}/{2} failed to deploy\n').format(compute_node, vt, vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def register_configuration(vm, compute_node=None, vt=None, ret=None, output=True, salt_target=None): ''' Register the configuration on the 'salt_target' node as a local registry salt_target is aimed to be the vm as default but can be any other reachable minion. Idea is that we copy this configuration on the compute node at first to provision the vm with the rights settings. ''' compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) func_name = 'mc_cloud_vm.register_configuration {0}'.format(vm) suf = '' if not salt_target: salt_target = vm if salt_target != vm: suf = '_{0}'.format(vm) if ret is None: ret = result() __salt__['mc_api.time_log']('start {0}'.format(func_name)) settings = __salt__['mc_cloud_vm.vm_sls_pillar'](compute_node, vm) cret = cli( 'mc_macros.update_local_registry', 'cloud_vm_settings{0}'.format(suf), settings, registry_format='pack', salt_target=salt_target) if ( isinstance(cret, dict) and( ( 'makina-states.local.' 'cloud_vm_settings{0}.vmSettings'.format( suf ) in cret ) ) ): ret['result'] = True ret['comment'] += yellow('VM Configuration stored' ' on {0}\n'.format(salt_target)) else: ret['result'] = False ret['comment'] += red('VM Configuration failed to store' ' on {0}\n'.format(salt_target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def post_provision(vm, compute_node=None, vt=None, ret=None, output=True): '''post provision a vm compute_node where to act vt virtual type vm vm to spawn steps list or comma separated list of steps Default:: ['ping', 'post_provision_hook'] :: mastersalt-run -lall mc_cloud_vm.post_provision foo.domain.tld ''' func_name = 'mc_cloud_vm.post_provision {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) for step in ['ping', 'post_provision_hook']: cret = __salt__['mc_cloud_vm.step'](vm, step, compute_node=compute_node, vt=vt, output=False) merge_results(ret, cret) if ret['result']: ret['comment'] += green( '{0}/{1}/{2} deployed\n').format(compute_node, vt, vm) else: ret['comment'] += red( '{0}/{1}/{2} failed to deploy\n').format(compute_node, vt, vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def saltify(name, output=True, ret=None): '''Saltify a specific target''' if not ret: ret = result() try: already_exists = __salt__['mc_cloud_controller.exists'](name) data = None if already_exists: success = green('{0} is already saltified'.format(name)) else: try: data = cli('mc_cloud_saltify.settings_for_target', name) if not isinstance(data, dict): raise SaltyficationError(red('{0}'.format(data))) except KeyError: data = None if data is None: raise SaltyficationError( red('Saltify target {0} is not configured'.format(name))) else: success = green('{0} is saltified') kwargs = {'minion': {'master': data['master'], 'master_port': data['master_port']}} for var in [ "ssh_username", "ssh_keyfile", "keep_tmp", "gateway", "sudo", "password", "script_args", "ssh_host", "sudo_password", ]: if data.get(var): kwargs[var] = data[var] try: info = __salt__['cloud.profile']( data['profile'], [name], vm_overrides=kwargs) except Exception, exc: trace = traceback.format_exc() ret['trace'] = trace raise FailedStepError(red('{0}'.format(exc))) ret = process_cloud_return( name, info, driver='saltify', ret=ret) if ret['result']: ret['comment'] = success if not output: ret['changes'] = {} check_point(ret, __opts__) # once saltified, also be sure that this host had #a time to accomplish it's setup through a full initial # highstate if not cli('mc_cloud_compute_node.get_conf_for_target', name, 'saltified'): if data is None: data = cli('mc_cloud_saltify.settings_for_target', name) csettings = cli('mc_cloud.settings') proxycmd = '' if data.get('ssh_gateway', None): args = '-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null' args += '-oControlPath=none' if 'ssh_key' in data: args += ' -i {0}'.format(data['ssh_key']) if 'ssh_port' in data: args += ' -p {0}'.format(data['ssh_port']) proxycmd = '-o\"ProxyCommand=ssh {1} {2} nc -w300 {1} 22\"'.format( data['ssh_gateway'], name, args ) cmd = ( 'ssh {2} {0} {1}/makina-states/_scripts/boot-salt.sh ' '--initial-highstate' ).format(name, csettings['root'], proxycmd) cmdret = cli('cmd.run_all', cmd) if cmdret['retcode']: ret['result'] = False ret['trace'] += 'Using cmd: \'{0}\''.format(cmd) ret['trace'] += '{0}\n'.format(cmdret['stdout']) ret['trace'] += '{0}\n'.format(cmdret['stderr']) ret['comment'] += red( 'SALTIFY: Error in highstate for {0}'.format(name)) check_point(ret, __opts__) # ok, marking initial highstate done cli('mc_cloud_compute_node.set_conf_for_target', name, 'saltified', True)
def vm_spawn(vm, compute_node=None, vt='lxc', ret=None, output=True, force=False): '''spawn the vm :: mastersalt-run -lall mc_cloud_lxc.vm_spawn foo.domain.tld ''' func_name = 'mc_cloud_lxc.vm_spawn {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if not ret: ret = result() compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) reg = cli('mc_macros.get_local_registry', 'mc_cloud_lxc_containers') provisioned_containers = reg.setdefault('provisioned_containers', OrderedDict()) containers = provisioned_containers.setdefault(compute_node, []) reg = __salt__['mc_cloud_vm.lazy_register_configuration_on_cn']( vm, compute_node) pillar = __salt__['mc_cloud_vm.vm_sls_pillar'](compute_node, vm) target = compute_node data = pillar['vtVmData'] cloudSettings = pillar['cloudSettings'] profile = data.get( 'profile', 'ms-{0}-dir-sratch'.format(target)) profile_data = { 'target': target, 'dnsservers': data.get("dnsservers", ["8.8.8.8", "4.4.4.4"]), 'minion': { 'master': data['master'], 'master_port': data['master_port'], } } for var in ["from_container", "snapshot", "image", "additional_ips", "gateway", "bridge", "mac", "lxc_conf_unset", "ssh_gateway", "ssh_gateway_user", "ssh_gateway_port", "ssh_gateway_key", "ip", "netmask", "size", "backing", "vgname", "script", "lvname", "script_args", "dnsserver", "ssh_username", "password", "lxc_conf"]: val = data.get(var) if val: if var in ['script_args']: if '--salt-cloud-dir' not in val: val = '{0} {1}'.format( val, '--salt-cloud-dir {0}') profile_data[var] = val marker = "{cloudSettings[prefix]}/pki/master/minions/{vm}".format( cloudSettings=cloudSettings, vm=vm) lret = cli('cmd.run_all', 'test -e {0}'.format(marker)) lret['retcode'] = 1 # verify if VM is already reachable if already marked as provisioned # this add a 10 seconds overhead upon VM creation # but enable us from crashing a vm that was loosed from local # registry and where reprovisionning can be harmful # As we are pinguing it, we are managing it, we will not # enforce spawning here ! try: ping = False if vm in containers: ping = cli('test.ping', salt_timeout=10, salt_target=vm) except Exception: ping = False if force or (lret['retcode'] and not ping): try: # XXX: Code to use with salt-cloud # cret = __salt__['cloud.profile']( # profile, [vm], vm_overrides=profile_data) # if vm not in cret: # cret['result'] = False # cret = cret[vm]['runner_return'] # XXX: using the lxc runner which is now faster and nicer. cret = __salt__['lxc.cloud_init']( [vm], host=compute_node, **profile_data) if not cret['result']: # convert to regular dict for pformat errors = dict(cret.pop('errors', {})) hosts = {} for h in errors: hosts[h] = dict(errors[h]) cret['errors'] = hosts ret['trace'] += 'FAILURE ON LXC {0}:\n{1}\n'.format( vm, pformat(dict(cret))) merge_results(ret, cret) ret['result'] = False else: ret['comment'] += '{0} provisioned\n'.format(vm) except Exception, ex: ret['trace'] += '{0}\n'.format(traceback.format_exc()) ret['result'] = False ret['comment'] += red(ex.message)
configured.append(vm) # if everything is well, wipe the unseful output cret['output'] = '' cret['trace'] = '' else: ret['result'] = False for k in ['trace', 'comment']: if k in cret: val = ret.setdefault(k, '') val += cret[k] if vm not in configuration_error: configuration_error.append(vm) cret.pop('result', False) merge_results(ret, cret) if len(configuration_error): ret['comment'] += red('There were errors while configuring ' 'vms nodes {0}\n'.format(configuration_error)) else: if ret['result']: ret['trace'] = '' ret['comment'] += green('All vms were configured\n') salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret def provision_vms(compute_node, skip=None, only=None, ret=None, output=True, refresh=False): '''Provision all or selected vms on a compute node ::