def map_minions(self,get_only_alive=False): """ Builds a recursive map of the minions currently assigned to this overlord """ maphash = {} current_minions = [] if get_only_alive: ping_results = fc.Overlord("*").test.ping() for minion in ping_results.keys(): if ping_results[minion] == 1: #if minion is alive current_minions.append(minion) #add it to the list else: cm = certmaster.CertMaster() current_minions = cm.get_signed_certs() for current_minion in current_minions: if current_minion in utils.get_hostname(): maphash[current_minion] = {} #prevent infinite recursion else: next_hop = fc.Overlord(current_minion) mapresults = next_hop.overlord.map_minions()[current_minion] if not utils.is_error(mapresults): maphash[current_minion] = mapresults else: maphash[current_minion] = {} return maphash
def map_minions(self, get_only_alive=False): """ Builds a recursive map of the minions currently assigned to this overlord """ maphash = {} current_minions = [] if get_only_alive: ping_results = fc.Overlord("*").test.ping() for minion in ping_results.keys(): if ping_results[minion] == 1: #if minion is alive current_minions.append(minion) #add it to the list else: cm = certmaster.CertMaster() current_minions = cm.get_signed_certs() for current_minion in current_minions: if current_minion in utils.get_hostname(): maphash[current_minion] = {} #prevent infinite recursion else: next_hop = fc.Overlord(current_minion) mapresults = next_hop.overlord.map_minions()[current_minion] if not utils.is_error(mapresults): maphash[current_minion] = mapresults else: maphash[current_minion] = {} return maphash
def build_map(self): fc = func_client.Overlord("*") fc.timeout = self.options.timeout minion_hash = fc.overlord.map_minions(self.options.only_alive==True) for minion in minion_hash.keys(): #clean hash of any top-level errors if utils.is_error(minion_hash[minion]): minion_hash[minion] = {} if self.options.verbose: print "- built the following map:" print minion_hash if self.options.append: try: oldmap = file(DEFAULT_TREE, 'r').read() old_hash = yaml.load(oldmap).next() oldmap.close() except e: sys.stderr.write("ERROR: old map could not be read, append failed\n") sys.exit(-1) merged_map = {} merged_map.update(old_hash) merged_map.update(minion_hash) if self.options.verbose: print "- appended new map to the following map:" print old_hash print " resulting in:" print merged_map minion_hash = merged_map if self.options.verbose: print "- writing to %s" % DEFAULT_TREE mapfile = file(DEFAULT_TREE, 'w') data = yaml.dump(minion_hash) mapfile.write(data)
def build_map(self): minion_hash = func_client.Overlord("*").overlord.map_minions( self.options.only_alive == True) for minion in minion_hash.keys(): #clean hash of any top-level errors if utils.is_error(minion_hash[minion]): minion_hash[minion] = {} if self.options.verbose: print "- built the following map:" print minion_hash if self.options.append: try: oldmap = file(DEFAULT_TREE, 'r').read() old_hash = yaml.load(oldmap).next() oldmap.close() except e: sys.stderr.write( "ERROR: old map could not be read, append failed\n") sys.exit(-1) merged_map = {} merged_map.update(old_hash) merged_map.update(minion_hash) if self.options.verbose: print "- appended new map to the following map:" print old_hash print " resulting in:" print merged_map minion_hash = merged_map if self.options.verbose: print "- writing to %s" % DEFAULT_TREE mapfile = file(DEFAULT_TREE, 'w') data = yaml.dump(minion_hash) mapfile.write(data)
def run(self, args): p = optparse.OptionParser() p.add_option("-v", "--verbose", dest="verbose", action="store_true", help="provide extra output") p.add_option("-s", "--server-spec", dest="server_spec", default="*", help="run against specific servers, default: '*'") p.add_option( "-m", "--memory", dest="memory", default="512", help="the memory requirements in megabytes, default: '512'") p.add_option("-a", "--arch", dest="arch", default="i386", help="the architecture requirements, default: 'i386'") (options, args) = p.parse_args(args) self.options = options # convert the memory and storage to integers for later comparisons memory = int(options.memory) arch = options.arch # see what hosts have enough RAM avail_hosts = {} host_freemem = func_client.Client(options.server_spec).virt.freemem() for (host, freemem) in host_freemem.iteritems(): if utils.is_error(freemem): print "-- connection refused: %s" % host continue # Take an additional 256M off the freemem to keep # Domain-0 stable (shrinking it to 256M can cause # it to crash under load) if (freemem - 256) >= memory: avail_hosts[host] = {'memory': freemem} # Default the dest_host to nothing dest_host = None # see what hosts have the right architecture arch_hosts = {} host_arch = func_client.Client( options.server_spec).command.run('uname -i') for (host, output) in host_arch.iteritems(): if utils.is_error(output): print "-- connection refused: %s" % host continue host_arch = output[1].rstrip() # If the host_arch is 64 bit, allow 32 bit machines on it if host_arch == arch or (host_arch == "x86_64" and arch == "i386"): arch_hosts[host] = host if len(avail_hosts) > 0: # Find the host that is the closest memory match # and matching architecture for (host, attrs) in avail_hosts.iteritems(): if arch_hosts.has_key(host): if not dest_host: dest_host = [host, attrs['memory']] else: if attrs['memory'] < dest_host[1]: # Use the better match dest_host = [host, attrs['memory']] return dest_host
def run(self,args): p = optparse.OptionParser() p.add_option("-v", "--verbose", dest="verbose", action="store_true", help="provide extra output") p.add_option("-s", "--server-spec", dest="server_spec", default="*", help="run against specific servers, default: '*'") p.add_option("-m", "--memory", dest="memory", default="512", help="the memory requirements in megabytes, default: '512'") p.add_option("-d", "--disk", dest="disk", default="20", help="the disk storage requirements in gigabytes, default: '20'") (options, args) = p.parse_args(args) self.options = options # convert the memory and storage to integers for later comparisons memory = int(options.memory) storage = int(options.disk) # see what hosts have enough RAM avail_hosts = {} host_freemem = func_client.Client(options.server_spec).virt.freemem() for (host, freemem) in host_freemem.iteritems(): if utils.is_error(freemem): print "-- connection refused: %s" % host continue # Take an additional 256M off the freemem to keep # Domain-0 stable (shrinking it to 256M can cause # it to crash under load) if (freemem-256) >= memory: avail_hosts[host] = {'memory': freemem} # figure out which of these machines have enough storage for avail_host in avail_hosts.keys(): # see what hosts have the required storage host_volume_groups = func_client.Client(avail_host).storage.vgs() for (host, vol_groups) in host_volume_groups.iteritems(): if utils.is_error(vol_groups): print "-- connection refused: %s" % host continue avail_vol_groups = [] for vol_group, attrs in vol_groups.iteritems(): free_space = int(float(attrs['free'][:-1])) if free_space >= storage: avail_vol_groups.append((vol_group, free_space)) if len(avail_vol_groups) > 0: avail_hosts[host]['space'] = dict(avail_vol_groups) else: avail_hosts.pop(host) # Default the dest_host to nothing dest_host = None if len(avail_hosts) > 0: # Find the host that is the closest memory match for (host, attrs) in avail_hosts.iteritems(): # Use a random volume group vol_group = random.choice(attrs['space'].keys()) if not dest_host: dest_host = [host, vol_group, attrs['memory']] else: if attrs['memory'] < dest_host[2]: # Use the better match dest_host = [host, vol_group, attrs['memory']] return dest_host
#!/usr/bin/python # report on any drives with SMART issues # these are likely going to kick the bucket # (C) Michael DeHaan, 2007 <*****@*****.**> # =============================================== import func.overlord.client as fc import func.utils as utils info = fc.Overlord("*").smart.info() failures = 0 for (host,details) in info.iteritems(): if utils.is_error(details): print "%s had an error : %s" % (host,details[1:3]) break (rc, list_of_output) = details if rc != 0: print "============================================" print "Host %s may have problems" % host print "\n".join(list_of_output[3:]) failures = failures + 1 print "\n%s systems reported problems" % failures
def run(self,args): p = optparse.OptionParser() p.add_option("-v", "--verbose", dest="verbose", action="store_true", help="provide extra output") p.add_option("-s", "--server-spec", dest="server_spec", default="*", help="run against specific servers, default: '*'") p.add_option("-m", "--methods", dest="methods", default="inventory", help="run inventory only on certain function names, default: 'inventory'") p.add_option("-M", "--modules", dest="modules", default="all", help="run inventory only on certain module names, default: 'all'") p.add_option("-t", "--tree", dest="tree", default=DEFAULT_TREE, help="output results tree here, default: %s" % DEFAULT_TREE) p.add_option("-n", "--no-git", dest="nogit", action="store_true", help="disable useful change tracking features") p.add_option("-x", "--xmlrpc", dest="xmlrpc", help="output data using XMLRPC format", action="store_true") p.add_option("-j", "--json", dest="json", help="output data using JSON", action="store_true") (options, args) = p.parse_args(args) self.options = options filtered_module_list = options.modules.split(",") filtered_function_list = options.methods.split(",") self.git_setup(options) # see what modules each host provides (as well as what hosts we have) host_methods = func_client.Overlord(options.server_spec).system.list_methods() # call all remote info methods and handle them if options.verbose: print "- scanning ..." # for (host, modules) in host_modules.iteritems(): for (host, methods) in host_methods.iteritems(): if utils.is_error(methods): print "-- connection refused: %s" % host break for each_method in methods: #if type(each_method) == int: # if self.options.verbose: # print "-- connection refused: %s" % host # break tokens = each_method.split(".") module_name = ".".join(tokens[:-1]) method_name = tokens[-1] if not "all" in filtered_module_list and not module_name in filtered_module_list: continue if not "all" in filtered_function_list and not method_name in filtered_function_list: continue overlord = func_client.Overlord(host,noglobs=True) # ,noglobs=True) results = getattr(getattr(overlord,module_name),method_name)() if self.options.verbose: print "-- %s: running: %s %s" % (host, module_name, method_name) self.save_results(options, host, module_name, method_name, results) self.git_update(options) return 1
def run(self,args): p = optparse.OptionParser() p.add_option("-v", "--verbose", dest="verbose", action="store_true", help="provide extra output") p.add_option("-s", "--server-spec", dest="server_spec", default="*", help="run against specific servers, default: '*'") p.add_option("-m", "--memory", dest="memory", default="512", help="the memory requirements in megabytes, default: '512'") p.add_option("-a", "--arch", dest="arch", default="i386", help="the architecture requirements, default: 'i386'") (options, args) = p.parse_args(args) self.options = options # convert the memory and storage to integers for later comparisons memory = int(options.memory) arch = options.arch # see what hosts have enough RAM avail_hosts = {} host_freemem = func_client.Client(options.server_spec).virt.freemem() for (host, freemem) in host_freemem.iteritems(): if utils.is_error(freemem): print "-- connection refused: %s" % host continue # Take an additional 256M off the freemem to keep # Domain-0 stable (shrinking it to 256M can cause # it to crash under load) if (freemem-256) >= memory: avail_hosts[host] = {'memory': freemem} # Default the dest_host to nothing dest_host = None # see what hosts have the right architecture arch_hosts = {} host_arch = func_client.Client(options.server_spec).command.run('uname -i') for (host, output) in host_arch.iteritems(): if utils.is_error(output): print "-- connection refused: %s" % host continue host_arch = output[1].rstrip() # If the host_arch is 64 bit, allow 32 bit machines on it if host_arch == arch or (host_arch == "x86_64" and arch == "i386"): arch_hosts[host] = host if len(avail_hosts) > 0: # Find the host that is the closest memory match # and matching architecture for (host, attrs) in avail_hosts.iteritems(): if arch_hosts.has_key(host): if not dest_host: dest_host = [host, attrs['memory']] else: if attrs['memory'] < dest_host[1]: # Use the better match dest_host = [host, attrs['memory']] return dest_host
#!/usr/bin/python # find all parts that need to be recalled # (C) Michael DeHaan, 2007 <*****@*****.**> # =============================================== import func.overlord.client as fc import func.utils as utils bad = open("./part_data.txt").read().split() info = fc.Overlord("*").hardware.hal_info() for (host, details) in info.iteritems(): if utils.is_error(details): print "%s had an error : %s" % (host, details[1:3]) break for (device, full_output) in details.iteritems(): for bad_value in bad: if full_output.find(bad_value) != -1: print "%s has flagged part: %s, matched %s" % (host, device, bad_value) break
class DelegationModule(func_module.FuncModule): version = "0.0.1" api_version = "0.0.1" description = "Minion-side module to support delegation on sub-Overlords." def run(self, module, method, args, delegation_list, async, nforks): """ Delegates commands down the path of delegation supplied as an argument """ result_dict = {} job_id_list = [] #separate list passed to us into minions we can call directly and #further delegation paths (single_paths, grouped_paths) = dtools.group_paths(delegation_list) #run delegated calls for group in grouped_paths.keys(): overlord = fc.Overlord(group, async=async, nforks=nforks) path_list = grouped_paths[group] delegation_results = overlord.delegation.run( module, method, args, path_list, async, nforks) if async: job_id_list.append([overlord, delegation_results, group, True]) else: #These are delegated calls, so we need to strip away the #hash that surrounds the results if utils.is_error(delegation_results[group]): result_dict.update(delegation_results) else: result_dict.update(delegation_results[group]) #run direct calls for minion in single_paths: overlord = fc.Overlord(minion, async=async, nforks=nforks) overlord_module = getattr(overlord, module) results = getattr(overlord_module, method)(*args[:]) if async: job_id_list.append([overlord, results, minion, False]) else: result_dict.update(results) #poll async calls while len(job_id_list) > 0: for job in job_id_list: (return_code, async_results) = job[0].job_status(job[1]) if return_code == jobthing.JOB_ID_RUNNING: pass #it's still going, ignore it this cycle elif return_code == jobthing.JOB_ID_PARTIAL: pass #yep, it's still rolling elif return_code == jobthing.JOB_ID_REMOTE_ERROR: result_dict.update(async_results) job_id_list.remove(job) else: #it's done or it's had an error, pass it up if job[3] == DIRECT: #this is a direct call, so we only need to #update the hash with the pertinent results results = async_results elif job[3] == DELEGATED: #this is a delegated call, so we need to strip #away the nesting hash results = async_results[job[2]] else: #and this code should never be reached results = {} result_dict.update(results) job_id_list.remove(job) time.sleep(0.1) #pause a bit so that we don't flood our minions return result_dict