def main(): """ <Purpose> Entry point into the program. Reads the hosts that need installing from file and then starts the threads that will take care of downloading and installing seattle. Then waits for all threads to finish. This takes a while as an RSA key needs to be generated during each install. <Arguments> None <Exceptions> Possible exception when launching new threads. <Side Effects> None. <Returns> None. """ # start the timeout monitor thread deploy_threading.init() # the fn of the file that contains the list of nodes we'll be using nodelist_fn = '' # did we get a parameter passed in? if so that's our fn if len(sys.argv) > 1: nodelist_fn = sys.argv[1] print 'Using '+nodelist_fn+' filename to read in hostnames' else: print 'Using default missing.list filename to read in hostnames' # get hosts from file #if nodelist_fn: # hosts = get_remote_hosts_from_file(nodelist_fn) #else: # use default fn # hosts = get_remote_hosts_from_file() # '128.208.1.130', 128.208.1.217 # or manually type in hosts here hosts = [ ] # if we have hostnames if hosts: # BEGIN func_handle = parallelize_repy.parallelize_initfunction(hosts, worker, 10) size = str(len(hosts)) while not parallelize_repy.parallelize_isfunctionfinished(func_handle): results_dict = parallelize_repy.parallelize_getresults(func_handle) print str(len(results_dict['aborted']))+' aborted, '+str(len(results_dict['exception']))+\ ' exceptioned, '+str(len(results_dict['returned']))+' finished of '+size+' total.' time.sleep(5) deploy_threading.destroy()
def start_thread(callfunc, arguments, max_threads): """ <Purpose> Starts the worker threads and calls init() for this module. This'll allow for the timeout thread to function properly. intended to be called only from connect_and_do_work. <Arguments> callfunc: the function to call. arguments: the list of tuples where each tuple is (username, host) to connect to max_threads: the max # of threads to start. <Exceptions> None. <Side Effects> None. <Returns> Boolean. True on success. False on failure. """ init() func_handle = parallelize_repy.parallelize_initfunction(arguments, callfunc, max_threads) thread_communications['func_handle'] = func_handle thread_communications['hosts_left'] = arguments[:] thread_communications['total_hosts'] = len(arguments)
def start_thread(callfunc, arguments, max_threads): """ <Purpose> Starts the worker threads and calls init() for this module. This'll allow for the timeout thread to function properly. intended to be called only from connect_and_do_work. <Arguments> callfunc: the function to call. arguments: the list of tuples where each tuple is (username, host) to connect to max_threads: the max # of threads to start. <Exceptions> None. <Side Effects> None. <Returns> Boolean. True on success. False on failure. """ init() func_handle = parallelize_repy.parallelize_initfunction( arguments, callfunc, max_threads) thread_communications['func_handle'] = func_handle thread_communications['hosts_left'] = arguments[:] thread_communications['total_hosts'] = len(arguments)
# keeps track of ip->hostname and hostname->ip mappings dns_dict = {} # this'll keep track of the already checked nodes dns_dict['flagged'] = [] # keeps track of the rerversed dns entries advertise_reversedns = [] # loopbacks, network, eg: 192.* networkiplist = [] # start multiple threads to reverse lookup all the hostnames/ips as our iplist2.list file # might have mixed forms (1.1.1.1 <-> bla.foo.bar, and we have to match those up) func_handle = parallelize_repy.parallelize_initfunction(advertised_nodes_list, deploy_helper.dnslookup, 15) while not parallelize_repy.parallelize_isfunctionfinished(func_handle): time.sleep(1) # Function is done results_dict = parallelize_repy.parallelize_getresults(func_handle) for each_key in results_dict.keys(): print results_dict[each_key] for each_tuple in results_dict['returned']: iphostname = each_tuple[0] reverse_iphostname = each_tuple[1] print str(iphostname)+' resolves to '+reverse_iphostname if iphostname == reverse_iphostname: