def register_configurations(compute_node, skip=None, only=None, ret=None, output=True, refresh=False): '''Register all configurations in localregistries for reachable vms :: mastersalt-run -lall mc_cloud_vm.register_configurations host1.domain.tld mastersalt-run -lall mc_cloud_vm.register_configurations host1.domain.tld only=['foo.domain.tld'] mastersalt-run -lall mc_cloud_vm.register_configurations host1.domain.tld skip=['foo2.domain.tld'] ''' func_name = 'mc_cloud_vm.configuration_vms' __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() _, only, __, skip = ( __salt__['mc_cloud_controller.gather_only_skip']( only_vms=only, skip_vms=skip)) if refresh: cli('saltutil.refresh_pillar') settings = cli('mc_cloud_compute_node.settings') gprov = ret['changes'].setdefault('vms_configured', {}) gerror = ret['changes'].setdefault('vms_in_error', {}) configured = gprov.setdefault(compute_node, []) configuration_error = gerror.setdefault(compute_node, []) vms = settings['targets'].get(compute_node, {'virt_types': [], 'vms': {}}) vms = filter_vms(compute_node, vms['vms'], skip, only) kvms = [a for a in vms] kvms.sort() for idx, vm in enumerate(kvms): vt = vms[vm] cret = result() try: # first: register the conf on compute node if not cli('test.ping', salt_target=compute_node): raise FailedStepError('not reachable') cret = register_configuration_on_cn(vm, compute_node=compute_node, vt=vt, ret=cret, output=False) check_point(cret, __opts__, output=output) # second: register the conf on VM # this may fail if the vm is not yet spawned if not cli('test.ping', salt_target=vm): raise FailedStepError('not reachable') cret = register_configuration(vm, compute_node=compute_node, vt=vt, ret=cret, output=False) check_point(cret, __opts__, output=output) except FailedStepError, exc: trace = traceback.format_exc() cret['trace'] += '{0}\n'.format(exc.message) cret['result'] = False except Exception, exc: trace = traceback.format_exc() cret = {'result': False, 'output': 'unknown error on {0}/{2}\n{1}'.format( compute_node, exc, vm), 'comment': 'unknown error on {0}/{1}\n'.format( compute_node, vm), 'trace': trace}
def provision_vms(compute_node, skip=None, only=None, ret=None, output=True, refresh=False): '''Provision all or selected vms on a compute node :: mastersalt-run -lall mc_cloud_vm.provision_vms host1.domain.tld mastersalt-run -lall mc_cloud_vm.provision_vms host1.domain.tld only=['foo.domain.tld'] mastersalt-run -lall mc_cloud_vm.provision_vms host1.domain.tld skip=['foo2.domain.tld'] ''' func_name = 'mc_cloud_vm.provision_vms' __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() _, only, __, skip = ( __salt__['mc_cloud_controller.gather_only_skip']( only_vms=only, skip_vms=skip)) if refresh: cli('saltutil.refresh_pillar') settings = cli('mc_cloud_compute_node.settings') gprov = ret['changes'].setdefault('vms_provisionned', {}) gerror = ret['changes'].setdefault('vms_in_error', {}) provisionned = gprov.setdefault(compute_node, []) provision_error = gerror.setdefault(compute_node, []) vms = settings['targets'].get(compute_node, {'virt_types': [], 'vms': {}}) vms = filter_vms(compute_node, vms['vms'], skip, only) kvms = [a for a in vms] kvms.sort() for idx, vm in enumerate(kvms): vt = vms[vm] cret = result() try: #if idx == 1: # raise FailedStepError('foo') #elif idx > 0: # raise Exception('bar') cret = provision(vm, compute_node=compute_node, vt=vt, ret=cret, output=False) except FailedStepError, exc: trace = traceback.format_exc() cret['trace'] += '{0}\n'.format(exc.message) cret['result'] = False except Exception, exc: trace = traceback.format_exc() cret = {'result': False, 'output': 'unknown error on {0}/{2}\n{1}'.format( compute_node, exc, vm), 'comment': 'unknown error on {0}/{1}\n'.format( compute_node, vm), 'trace': trace}
def register_configurations(only=None, only_vms=None, skip=None, skip_vms=None, refresh=False, ret=None, output=True): '''Parse all reachable compute nodes and vms and regenerate the local configuration registries concerning cloud deployment''' func_name = 'mc_compute_node.register_configurations' __salt__['mc_api.time_log']('start {0}'.format(func_name)) only, _, skip, __ = ( __salt__['mc_cloud_controller.gather_only_skip']( only=only, skip=skip)) if ret is None: ret = result() if refresh: cli('saltutil.refresh_pillar') settings = cli('mc_cloud_compute_node.settings') configuration = ret['changes'].setdefault('cns_configured', []) configuration_error = ret['changes'].setdefault('cns_in_error', []) targets = [a for a in settings['targets']] # targets += ['foo', 'bar'] targets = filter_compute_nodes(targets, skip, only) hosts_to_configure_vms = [] for idx, compute_node in enumerate(targets): cret = result() try: if not cli('test.ping', salt_target=compute_node): raise FailedStepError('not reachable') register_configuration(compute_node, ret=cret, output=False) check_point(cret, __opts__, output=output) if compute_node not in hosts_to_configure_vms: hosts_to_configure_vms.append(compute_node) except FailedStepError: cret['result'] = False except Exception, exc: trace = traceback.format_exc() cret = {'result': False, 'output': 'unknown error on {0}\n{1}'.format(compute_node, exc), 'comment': 'unknown error on {0}\n'.format(compute_node), 'trace': trace} if cret['result']: if compute_node not in configuration: configuration.append(compute_node) # if everything is well, wipe the unseful output cret['output'] = '' cret['trace'] = '' else: ret['result'] = False if compute_node not in configuration_error: configuration_error.append(compute_node) cret.pop('result', False) merge_results(ret, cret)
def post_provision_compute_nodes(skip=None, only=None, output=True, refresh=False, ret=None): '''post provision all compute nodes ''' func_name = 'mc_compute_node.post_provision_compute_nodes' __salt__['mc_api.time_log']('start {0}'.format(func_name)) only, _, skip, __ = ( __salt__['mc_cloud_controller.gather_only_skip']( only=only, skip=skip)) if ret is None: ret = result() if refresh: cli('saltutil.refresh_pillar') settings = cli('mc_cloud_compute_node.settings') provision = ret['changes'].setdefault('postp_cns_provisionned', []) provision_error = ret['changes'].setdefault('postp_cns_in_error', []) targets = [a for a in settings['targets']] #targets += ['foo', 'bar'] targets = filter_compute_nodes(targets, skip, only) for idx, compute_node in enumerate(targets): cret = result() try: post_deploy(compute_node, ret=cret, output=False) #if idx == 1: # raise FailedStepError('foo') #elif idx > 0: # raise Exception('bar') except FailedStepError: cret['result'] = False except Exception, exc: trace = traceback.format_exc() cret = {'result': False, 'output': 'unknown error on {0}\n{1}'.format(compute_node, exc), 'comment': 'unknown error on {0}\n'.format(compute_node), 'trace': trace} if cret['result']: if compute_node not in provision: provision.append(compute_node) # if everything is well, wipe the unseful output cret['output'] = '' cret['trace'] = '' else: ret['result'] = False if compute_node not in provision_error: provision_error.append(compute_node) cret.pop('result', False) merge_results(ret, cret)
def step(vm, step, compute_node=None, vt=None, ret=None, output=True): '''Execute a step on a VM noder''' func_name = 'mc_cloud_vm.provision.step {0} {1}'.format(vm, step) __salt__['mc_api.time_log']('start {0}'.format(func_name)) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) if ret is None: ret = result() pre_vid_ = 'mc_cloud_{0}.vm_{1}'.format(vt, step) id_ = 'mc_cloud_vm.vm_{1}'.format(vt, step) post_vid_ = 'mc_cloud_{0}.post_vm_{1}'.format(vt, step) for cid_ in [pre_vid_, id_, post_vid_]: if (not ret['result']) or (cid_ not in __salt__): continue try: ret = __salt__[cid_](vm, compute_node=compute_node, vt=vt, ret=ret, output=False) check_point(ret, __opts__, output=output) except FailedStepError: ret['result'] = False except Exception, exc: trace = traceback.format_exc() ret['trace'] += 'lxcprovision: {0} in {1}\n'.format( exc, cid_) ret['trace'] += trace ret['result'] = False ret['comment'] += red('unmanaged exception for ' '{0}/{1}/{2}'.format(compute_node, vt, vm)) if ret['result']: ret['trace'] = '' ret['output'] = ''
def vm_ping(vm, compute_node=None, vt=None, ret=None, output=True): '''ping a specific vm on a specific compute node compute_node where to act vm vm to ping :: mastersalt-run -lall mc_cloud_vm.vm_ping foo.domain.tld ''' __salt__['mc_cloud_vm.lazy_register_configuration']( vm, compute_node) func_name = 'mc_cloud_vm.provision.ping {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) if ret is None: ret = result() try: ping = cli('test.ping', salt_target=vm) except Exception: ret['trace'] += "{0}\n".format(traceback.format_exc()) ping = False ret['result'] = ping if ret['result']: comment = green('VM {0} is pinguable\n') else: comment = red('VM {0} is unreachable\n') ret['comment'] += comment.format(vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def post_post_deploy_compute_node(target, output=True): '''post deployment hook for controller''' func_name = 'mc_cloud_lxc.post_post_deploy_compute_node {0}'.format( target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) ret = result() nodetypes_reg = cli('mc_nodetypes.registry') slss, pref = [], 'makina-states.cloud.lxc.compute_node' if nodetypes_reg['is']['devhost']: slss.append('{0}.devhost'.format(pref)) if slss: ret = __salt__['mc_api.apply_sls']( slss, **{'salt_target': target, 'ret': ret}) msg = 'Post installation: {0}\n' if ret['result']: clr = green status = 'sucess' else: clr = red status = 'failure' ret['comment'] += clr(msg.format(status)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def cn_sls_pillar(target, ttl=api.RUNNER_CACHE_TIME, output=False): '''limited cloud pillar to expose to a compute node''' func_name = 'mc_cloud_lxc.cn_sls_pillar {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) def _do(target): pillar = {} imgSettings = cli('mc_cloud_images.settings') lxcSettings = cli('mc_cloud_lxc.settings') imgSettingsData = {} lxcSettingsData = {} for name, imageData in imgSettings['lxc']['images'].items(): imgSettingsData[name] = { 'lxc_tarball': imageData['lxc_tarball'], 'lxc_tarball_md5': imageData['lxc_tarball_md5'], 'lxc_tarball_name': imageData['lxc_tarball_name'], 'lxc_tarball_ver': imageData['lxc_tarball_ver']} for v in ['use_bridge', 'bridge', 'gateway', 'netmask_full', 'network', 'netmask']: lxcSettingsData[v] = lxcSettings['defaults'][v] # imgSettingsData = api.json_dump(imgSettingsData) # lxcSettingsData = api.json_dump(lxcSettingsData) pillar.update({'lxcSettings': lxcSettingsData, 'imgSettings': imgSettingsData}) return pillar cache_key = 'mc_cloud_lxc.cn_sls_pillar_{0}'.format(target) ret = memoize_cache(_do, [target], {}, cache_key, ttl) cret = result() cret['result'] = ret salt_output(cret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def report(targets, ret=None, refresh=False, output=True): '''Parse all reachable compute nodes and vms and regenerate the local configuration registries concerning cloud deployment''' func_name = 'mc_compute_node.register_configurations' __salt__['mc_api.time_log']('start {0}'.format(func_name)) settings = cli('mc_cloud_compute_node.settings') if ret is None: ret = result() if refresh: cli('saltutil.refresh_pillar') sret = '' if not isinstance(targets, list): targets = targets.split(',') for target in targets: # if compute_node if target in settings['targets']: for vm in settings['targets'][target]['vms']: if vm not in targets: targets.append(vm) for idx, target in enumerate(targets): try: if not cli('test.ping', salt_target=target): continue except Exception: continue sret += '{0}'.format( cli('mc_project.report', salt_target=target) ) ret['result'] = sret salt_output(ret, __opts__, output=output, onlyret=True) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def sync_image_reference_containers(builder_ref, img, ret=None, template='ubuntu', snapshot=True, force=False): ''' Sapshot container (copy to img & impersonate) ''' _s = __salt__ # try to find the local img reference building counterpart # and sync it back to the reference lxc if ret is None: ret = saltapi.result() rootfs = '/var/lib/lxc/{0}/rootfs'.format(img) if not os.path.exists(rootfs): lxccreate = _s['cmd.run_all']('lxc-create -t {1} -n {0}'.format( img, template)) if lxccreate['retcode'] != 0: ret['result'] = False ret['comment'] = ('creation container for {0} failed'.format(img)) sync_container('/var/lib/lxc/{0}/rootfs'.format(builder_ref), rootfs, ret, snapshot=snapshot, force=force) clean_lxc_config(img, start=False) return ret
def deploy(target, output=True, ret=None, hooks=True, pre=True, post=True): '''Prepare cloud controller configuration can also apply per virtualization type configuration''' __salt__['mc_cloud_compute_node.lazy_register_configuration'](target) func_name = 'mc_compute_node.deploy {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() ret['comment'] += green('Installing compute node configuration\n') if hooks and pre: run_vt_hook('pre_deploy_compute_node', ret=ret, target=target, output=output) for step in [ register_configuration, configure_prevt, # merged in configure_prevt for perf reason # configure_sshkeys, # configure_grains, install_vts, configure_network, configure_host, # merged in configure_host for perf reason # configure_hostsfile, # configure_firewall, # configure_sslcerts, # configure_reverse_proxy ]: step(target, ret=ret, output=False) check_point(ret, __opts__, output=output) if hooks and post: run_vt_hook('post_deploy_compute_node', ret=ret, target=target, output=output) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def register_configuration(target, ret=None, output=True): ''' drop the compute node configuration ''' func_name = 'mc_compute_node.register_configuration {0}'.format(target) if ret is None: ret = result() __salt__['mc_api.time_log']('start {0}'.format(func_name)) settings = __salt__['mc_cloud_compute_node.cn_sls_pillar'](target) cret = cli( 'mc_macros.update_local_registry', 'cloud_compute_node_settings', settings, registry_format='pack', salt_target=target) if ( isinstance(cret, dict) and( 'makina-states.local.' 'cloud_compute_node_settings.cnSettings' in cret ) ): ret['result'] = True ret['comment'] += yellow('Configuration stored' ' on {0}\n'.format(target)) else: ret['result'] = False ret['comment'] += red('Configuration failed to store' ' on {0}\n'.format(target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def provision(vm, compute_node=None, vt=None, steps=None, ret=None, output=True): '''provision a vm compute_node where to act vt virtual type vm vm to spawn steps list or comma separated list of steps Default:: ['spawn', 'hostsfile', 'sshkeys', 'grains', 'initial_setup', 'initial_highstate'] :: mastersalt-run -lall mc_cloud_vm.provision foo.domain.tld ''' func_name = 'mc_cloud_vm.provision {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) if isinstance(steps, basestring): steps = steps.split(',') if steps is None: steps = ['register_configuration_on_cn', 'spawn', 'register_configuration', 'preprovision', # 'sshkeys', # 'hostsfile', # 'grains', # 'markers', 'initial_setup', 'initial_highstate'] if ret is None: ret = result() for step in steps: cret = __salt__['mc_cloud_vm.step'](vm, step, compute_node=compute_node, vt=vt, output=False) merge_results(ret, cret) if ret['result']: ret['comment'] += green( '{0}/{1}/{2} deployed\n').format(compute_node, vt, vm) else: ret['comment'] += red( '{0}/{1}/{2} failed to deploy\n').format(compute_node, vt, vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def register_configuration(vm, compute_node=None, vt=None, ret=None, output=True, salt_target=None): ''' Register the configuration on the 'salt_target' node as a local registry salt_target is aimed to be the vm as default but can be any other reachable minion. Idea is that we copy this configuration on the compute node at first to provision the vm with the rights settings. ''' compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) func_name = 'mc_cloud_vm.register_configuration {0}'.format(vm) suf = '' if not salt_target: salt_target = vm if salt_target != vm: suf = '_{0}'.format(vm) if ret is None: ret = result() __salt__['mc_api.time_log']('start {0}'.format(func_name)) settings = __salt__['mc_cloud_vm.vm_sls_pillar'](compute_node, vm) cret = cli( 'mc_macros.update_local_registry', 'cloud_vm_settings{0}'.format(suf), settings, registry_format='pack', salt_target=salt_target) if ( isinstance(cret, dict) and( ( 'makina-states.local.' 'cloud_vm_settings{0}.vmSettings'.format( suf ) in cret ) ) ): ret['result'] = True ret['comment'] += yellow('VM Configuration stored' ' on {0}\n'.format(salt_target)) else: ret['result'] = False ret['comment'] += red('VM Configuration failed to store' ' on {0}\n'.format(salt_target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def _configure(what, target, ret, output): __salt__['mc_cloud_compute_node.lazy_register_configuration'](target) func_name = 'mc_compute_node._configure {0} {1}'.format(what, target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() ret['comment'] += yellow('Installing {1} on {0}\n'.format(target, what)) ret = __salt__['mc_api.apply_sls']( '{0}.{1}'.format(_GPREF, what), **{ 'salt_target': target, 'ret': ret}) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def post_deploy_controller(output=True): '''Prepare cloud controller LXC configuration''' func_name = 'mc_cloud_lxc.post_deploy_controller' __salt__['mc_api.time_log']('start {0}'.format(func_name)) ret = result() ret['comment'] = yellow('Installing controller lxc configuration\n') pref = 'makina-states.cloud.lxc.controller' ret = __salt__['mc_api.apply_sls']( ['{0}.postdeploy'.format(pref)], **{'ret': ret}) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def vm_initial_highstate(vm, compute_node=None, vt=None, ret=None, output=True): '''Run the initial highstate, this step will run only once and will further check for the existence of <saltroot>/makina-states/.initial_hs file compute_node where to act vm vm to run highstate on :: mastersalt-run -lall mc_cloud_vm.vm_initial_highstate foo.domain.tld ''' __salt__['mc_cloud_vm.lazy_register_configuration']( vm, compute_node) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) if not ret: ret = result() pillar = __salt__['mc_cloud_vm.vm_sls_pillar'](compute_node, vm) vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) cmd = ("ssh -o\"ProxyCommand=ssh {target} nc -w300 {vm} 22\"" " footarget {cloudSettings[root]}/makina-states/" "_scripts/boot-salt.sh " "--initial-highstate").format(vm=vm, target=compute_node, cloudSettings=pillar['cloudSettings']) unless = ("ssh -o\"ProxyCommand=ssh {target} " "nc -w300 {vm} 22\" footarget " "test -e '/etc/makina-states/initial_highstate'").format( vm=vm, target=compute_node, cloudSettings=pillar['cloudSettings']) cret = cli('cmd.run_all', unless) if cret['retcode']: rcret = cli('cmd.run_all', cmd, use_vt=True, output_loglevel='info') if not rcret['retcode']: ret['comment'] = ( 'Initial highstate done on {0}'.format(vm) ) else: ret['result'] = False ret['trace'] += rcret['stdout'] + '\n' ret['trace'] += rcret['stderr'] + '\n' ret['comment'] += ( 'Initial highstate failed on {0}\n'.format(vm) ) else: ret['comment'] += 'Initial highstate already done on {0}\n'.format(vm) salt_output(ret, __opts__, output=output) return ret
def install_vts(target, ret=None, output=True): '''install all virtual types to be ready to host vms''' func_name = 'mc_compute_node.install_vts {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() ret = run_vt_hook('install_vt', ret=ret, target=target, output=output) if ret['result']: ret['comment'] += yellow( '{0} is now ready to host vms\n'.format(target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def upgrade_vt(target, ret=None, output=True): '''Upgrade LXC hosts This will reboot all containers upon lxc upgrade Containers are marked as being rebooted, and unmarked as soon as this script unmark explicitly them to be done. ''' func_name = 'mc_cloud_lxc.upgrade_vt {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if not ret: ret = result() ret['comment'] += yellow('Upgrading lxc on {0}\n'.format(target)) version = cli('cmd.run', 'lxc-info --version', salt_target=target) # run the install SLS which should take care of upgrading for step in [configure_install_lxc]: try: step(target, ret=ret, output=False) except FailedStepError: ret['result'] = False ret['comment'] += red('Failed to upgrade lxc\n') return ret # after upgrading nversion = cli('cmd.run', 'lxc-info --version', salt_target=target) if nversion != version: containers = cli('lxc.list', salt_target=target) reg = cli('mc_macros.update_local_registry', 'lxc_to_restart', {'todo': containers.get('running', [])}, salt_target=target) ret['comment'] += red('Upgraded lxc\n') else: ret['comment'] += red('lxc was already at the last version\n') reg = cli('mc_macros.get_local_registry', 'lxc_to_restart', salt_target=target) todo = reg.get('todo', []) done = [] for lxc in todo: try: stopret = cli('lxc.stop', lxc, salt_target=target) if not stopret['result']: raise ValueError('wont stop') startret = cli('lxc.start', lxc, salt_target=target) if not startret['result']: raise ValueError('wont start') ret['comment'] += yellow('Rebooted {0}\n'.format(lxc)) done.append(lxc) except Exception, ex: ret['result'] = False ret['comment'] += yellow( 'lxc {0} failed to' ' reboot: {1}\n'.format(lxc, ex.message))
def post_deploy(target, ret=None, output=True): '''Prepare cloud controller configuration can also apply per virtualization type configuration''' func_name = 'mc_compute_node.post_deploy {0}'.format(target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() hook = 'pre_post_deploy_compute_node' run_vt_hook(hook, ret=ret, target=target, output=output) for step in []: step(target, ret=ret, output=False) check_point(ret, __opts__, output=output) hook = 'post_post_deploy_compute_node' run_vt_hook(hook, ret=ret, target=target, output=output) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def orchestrate(output=True, only=None, skip=None, ret=None, refresh=False): '''Parse saltify settings to saltify all targets output display output only specify explicitly which hosts to provision among all avalaible ones skip hosts to skip refresh refresh pillar ''' if skip is None: skip = [] if only is None: only = [] if ret is None: ret = result() if refresh: cli('saltutil.refresh_pillar') comment = '' settings = cli('mc_cloud_saltify.settings') saltified = ret['changes'].setdefault('saltified', []) saltified_error = ret['changes'].setdefault('saltified_errors', []) targets = [a for a in settings['targets']] targets = filter_compute_nodes(targets, skip, only) targets.sort() for idx, compute_node in enumerate(targets): try: cret = saltify(compute_node, output=False) if cret['result']: saltified.append(compute_node) else: raise SaltyficationError( 'Target {0} failed to saltify:\n{1}'.format( compute_node, cret['comment'])) except Exception, exc: trace = traceback.format_exc() comment += yellow( '\nSaltyfication failed for {0}: {1}'.format( compute_node, exc)) if not isinstance(exc, SaltyficationError): ret['trace'] += '\n'.format(trace) log.error(trace) saltified_error.append(compute_node)
def dns_conf(output=True, ret=None): '''Prepare cloud controller dns (BIND) server''' func_name = 'mc_cloud_controller.dns_conf' __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() kw = {'ret': ret, 'output': output} kw['ret']['comment'] += green( 'Installing cloud controller DNS configuration\n') run_vt_hook('pre_dns_conf_on_controller', ret=kw['ret'], output=output) __salt__['mc_api.apply_sls']( ['makina-states.cloud.generic.controller.dnsconf'], **kw) check_point(kw['ret'], __opts__, output=output) run_vt_hook('post_dns_conf_on_controller', ret=kw['ret'], output=output) salt_output(kw['ret'], __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return kw['ret']
def _vm_configure(what, target, compute_node, vm, ret, output): __salt__['mc_cloud_vm.lazy_register_configuration'](vm, compute_node) func_name = 'mc_cloud_lxc._vm_configure {0} {1} {2} {3}'.format( what, target, compute_node, vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() ret['comment'] += yellow( 'LXC: Installing {2} on vm ' '{0}/{1}\n'.format(compute_node, vm, what)) pref = 'makina-states.cloud.lxc.vm' ret = __salt__['mc_api.apply_sls']( '{0}.{1}'.format(pref, what), **{ 'salt_target': target, 'ret': ret}) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def install_vt(target, output=True): '''install & configure lxc''' func_name = 'mc_cloud_lxc.install_vt {0}'.format( target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) ret = result() ret['comment'] += yellow('Installing lxc on {0}\n'.format(target)) for step in [configure_grains, configure_install_lxc, configure_images]: try: step(target, ret=ret, output=False) except FailedStepError: pass __salt__['mc_cloud_lxc.sync_images'](target, output=False, ret=ret) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def sync_images(target, output=True, ret=None): '''sync images on target''' func_name = 'mc_cloud_lxc.sync_images {0}'.format( target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() iret = __salt__['mc_lxc.sync_images'](only=[target]) if iret['result']: ret['comment'] += yellow( 'LXC: images synchronnised on {0}\n'.format(target)) else: merge_results(ret, iret) ret['comment'] += yellow( 'LXC: images failed to synchronnise on {0}\n'.format(target)) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def deploy(output=True, ret=None): '''Prepare cloud controller configuration can also apply per virtualization type configuration''' func_name = 'mc_cloud_controller.deploy' __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() kw = {'ret': ret, 'output': output} kw['ret']['comment'] += green( 'Installing cloud controller configuration files\n') run_vt_hook('pre_deploy_controller', ret=kw['ret'], output=output) __salt__['mc_api.apply_sls']( ['makina-states.cloud.generic.controller', 'makina-states.cloud.saltify'], **kw) check_point(kw['ret'], __opts__, output=output) run_vt_hook('post_deploy_controller', ret=kw['ret'], output=output) salt_output(kw['ret'], __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return kw['ret']
def sync_container(origin, destination, ret=None, snapshot=True, force=False): _s = __salt__ if ret is None: ret = saltapi.result() if os.path.exists(origin) and os.path.exists(destination): if test_same_versions(origin, destination, force=force): return ret cmd = ('rsync -aA --exclude=lock --delete ' '{0}/ {1}/').format(origin, destination) cret = _s['cmd.run_all'](cmd) if cret['retcode']: ret['comment'] += ('\nRSYNC(local builder) failed {0} {1}'.format( origin, destination)) ret['result'] = False return ret if snapshot: cret = snapshot_container(destination) if cret['retcode']: ret['comment'] += ('\nRSYNC(local builder) reset failed {0}'. format(destination)) ret['result'] = False return ret
def post_provision(vm, compute_node=None, vt=None, ret=None, output=True): '''post provision a vm compute_node where to act vt virtual type vm vm to spawn steps list or comma separated list of steps Default:: ['ping', 'post_provision_hook'] :: mastersalt-run -lall mc_cloud_vm.post_provision foo.domain.tld ''' func_name = 'mc_cloud_vm.post_provision {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if ret is None: ret = result() vt = __salt__['mc_cloud_vm.get_vt'](vm, vt) compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) for step in ['ping', 'post_provision_hook']: cret = __salt__['mc_cloud_vm.step'](vm, step, compute_node=compute_node, vt=vt, output=False) merge_results(ret, cret) if ret['result']: ret['comment'] += green( '{0}/{1}/{2} deployed\n').format(compute_node, vt, vm) else: ret['comment'] += red( '{0}/{1}/{2} failed to deploy\n').format(compute_node, vt, vm) salt_output(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def run_vt_hook(hook_name, ret=None, target=None, vts=None, output=True, *args, **kwargs): '''Run an hook for a special vt on a controller, or a compute node or a vm''' func_name = ( 'mc_cloud_controller.run_vt_hook ' '{0} {1}').format(hook_name, target) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if target: kwargs['target'] = target if ret is None: ret = result() if not vts: if not target: settings = cli('mc_cloud_controller.settings') vts = settings['vts'] else: settings = cli('mc_cloud_compute_node.settings') vts = settings['targets'][target]['virt_types'] if isinstance(vts, basestring): vts = [vts] for vt in vts: vid_ = 'mc_cloud_{0}.{1}'.format(vt, hook_name) if vid_ in __salt__: ret['comment'] += ( green('\n --> ') + blue(vid_) + green(' hook\n') ) kwargs['output'] = False cret = __salt__[vid_](*args, **kwargs) merge_results(ret, cret) check_point(ret, __opts__, output=output) __salt__['mc_api.time_log']('end {0}'.format(func_name)) return ret
def apply_sls_(func, slss, salt_output_t='highstate', status_msg=None, sls_status_msg=None, ret=None, output=False, *a, **kwargs): local_target = __grains__.get('id', __opts__.get('id', 'local')) target = salt_target = kwargs.pop('salt_target', None) if target is None: target = local_target if ret is None: ret = result() if isinstance(slss, basestring): slss = [slss] sls_kw = kwargs.pop('sls_kw', {}) statuses = [] salt_ok, salt_ko = 'success', 'failed' for sls in slss: cret = None try: cliret = cli(func, sls, salt_target=salt_target, *a, **sls_kw) cret = filter_state_return(cliret, target=target, output=salt_output_t) valid_state_return(cliret, sls=sls) ret['output'] += cret statuses.append((salt_ok, sls)) except SaltExit, exc: trace = traceback.format_exc() ret['result'] = False ret['output'] += '- {0}\n{1}\n'.format(yellow(sls), exc) ret['trace'] += '{0}\n'.format(trace) statuses.append((salt_ko, sls)) if cret: ret['trace'] += '{0}\n'.format(pformat(cret)) ret['output'] += '{0}\n'.format(cret)
def saltify(name, output=True, ret=None): '''Saltify a specific target''' if not ret: ret = result() try: already_exists = __salt__['mc_cloud_controller.exists'](name) data = None if already_exists: success = green('{0} is already saltified'.format(name)) else: try: data = cli('mc_cloud_saltify.settings_for_target', name) if not isinstance(data, dict): raise SaltyficationError(red('{0}'.format(data))) except KeyError: data = None if data is None: raise SaltyficationError( red('Saltify target {0} is not configured'.format(name))) else: success = green('{0} is saltified') kwargs = {'minion': {'master': data['master'], 'master_port': data['master_port']}} for var in [ "ssh_username", "ssh_keyfile", "keep_tmp", "gateway", "sudo", "password", "script_args", "ssh_host", "sudo_password", ]: if data.get(var): kwargs[var] = data[var] try: info = __salt__['cloud.profile']( data['profile'], [name], vm_overrides=kwargs) except Exception, exc: trace = traceback.format_exc() ret['trace'] = trace raise FailedStepError(red('{0}'.format(exc))) ret = process_cloud_return( name, info, driver='saltify', ret=ret) if ret['result']: ret['comment'] = success if not output: ret['changes'] = {} check_point(ret, __opts__) # once saltified, also be sure that this host had #a time to accomplish it's setup through a full initial # highstate if not cli('mc_cloud_compute_node.get_conf_for_target', name, 'saltified'): if data is None: data = cli('mc_cloud_saltify.settings_for_target', name) csettings = cli('mc_cloud.settings') proxycmd = '' if data.get('ssh_gateway', None): args = '-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null' args += '-oControlPath=none' if 'ssh_key' in data: args += ' -i {0}'.format(data['ssh_key']) if 'ssh_port' in data: args += ' -p {0}'.format(data['ssh_port']) proxycmd = '-o\"ProxyCommand=ssh {1} {2} nc -w300 {1} 22\"'.format( data['ssh_gateway'], name, args ) cmd = ( 'ssh {2} {0} {1}/makina-states/_scripts/boot-salt.sh ' '--initial-highstate' ).format(name, csettings['root'], proxycmd) cmdret = cli('cmd.run_all', cmd) if cmdret['retcode']: ret['result'] = False ret['trace'] += 'Using cmd: \'{0}\''.format(cmd) ret['trace'] += '{0}\n'.format(cmdret['stdout']) ret['trace'] += '{0}\n'.format(cmdret['stderr']) ret['comment'] += red( 'SALTIFY: Error in highstate for {0}'.format(name)) check_point(ret, __opts__) # ok, marking initial highstate done cli('mc_cloud_compute_node.set_conf_for_target', name, 'saltified', True)
def vm_spawn(vm, compute_node=None, vt='lxc', ret=None, output=True, force=False): '''spawn the vm :: mastersalt-run -lall mc_cloud_lxc.vm_spawn foo.domain.tld ''' func_name = 'mc_cloud_lxc.vm_spawn {0}'.format(vm) __salt__['mc_api.time_log']('start {0}'.format(func_name)) if not ret: ret = result() compute_node = __salt__['mc_cloud_vm.get_compute_node'](vm, compute_node) reg = cli('mc_macros.get_local_registry', 'mc_cloud_lxc_containers') provisioned_containers = reg.setdefault('provisioned_containers', OrderedDict()) containers = provisioned_containers.setdefault(compute_node, []) reg = __salt__['mc_cloud_vm.lazy_register_configuration_on_cn']( vm, compute_node) pillar = __salt__['mc_cloud_vm.vm_sls_pillar'](compute_node, vm) target = compute_node data = pillar['vtVmData'] cloudSettings = pillar['cloudSettings'] profile = data.get( 'profile', 'ms-{0}-dir-sratch'.format(target)) profile_data = { 'target': target, 'dnsservers': data.get("dnsservers", ["8.8.8.8", "4.4.4.4"]), 'minion': { 'master': data['master'], 'master_port': data['master_port'], } } for var in ["from_container", "snapshot", "image", "additional_ips", "gateway", "bridge", "mac", "lxc_conf_unset", "ssh_gateway", "ssh_gateway_user", "ssh_gateway_port", "ssh_gateway_key", "ip", "netmask", "size", "backing", "vgname", "script", "lvname", "script_args", "dnsserver", "ssh_username", "password", "lxc_conf"]: val = data.get(var) if val: if var in ['script_args']: if '--salt-cloud-dir' not in val: val = '{0} {1}'.format( val, '--salt-cloud-dir {0}') profile_data[var] = val marker = "{cloudSettings[prefix]}/pki/master/minions/{vm}".format( cloudSettings=cloudSettings, vm=vm) lret = cli('cmd.run_all', 'test -e {0}'.format(marker)) lret['retcode'] = 1 # verify if VM is already reachable if already marked as provisioned # this add a 10 seconds overhead upon VM creation # but enable us from crashing a vm that was loosed from local # registry and where reprovisionning can be harmful # As we are pinguing it, we are managing it, we will not # enforce spawning here ! try: ping = False if vm in containers: ping = cli('test.ping', salt_timeout=10, salt_target=vm) except Exception: ping = False if force or (lret['retcode'] and not ping): try: # XXX: Code to use with salt-cloud # cret = __salt__['cloud.profile']( # profile, [vm], vm_overrides=profile_data) # if vm not in cret: # cret['result'] = False # cret = cret[vm]['runner_return'] # XXX: using the lxc runner which is now faster and nicer. cret = __salt__['lxc.cloud_init']( [vm], host=compute_node, **profile_data) if not cret['result']: # convert to regular dict for pformat errors = dict(cret.pop('errors', {})) hosts = {} for h in errors: hosts[h] = dict(errors[h]) cret['errors'] = hosts ret['trace'] += 'FAILURE ON LXC {0}:\n{1}\n'.format( vm, pformat(dict(cret))) merge_results(ret, cret) ret['result'] = False else: ret['comment'] += '{0} provisioned\n'.format(vm) except Exception, ex: ret['trace'] += '{0}\n'.format(traceback.format_exc()) ret['result'] = False ret['comment'] += red(ex.message)