def main(): """ <Purpose> Entry point into the program. Reads the hosts that need installing from file and then starts the threads that will take care of downloading and installing seattle. Then waits for all threads to finish. This takes a while as an RSA key needs to be generated during each install. <Arguments> None <Exceptions> Possible exception when launching new threads. <Side Effects> None. <Returns> None. """ # start the timeout monitor thread deploy_threading.init() # the fn of the file that contains the list of nodes we'll be using nodelist_fn = '' # did we get a parameter passed in? if so that's our fn if len(sys.argv) > 1: nodelist_fn = sys.argv[1] print 'Using '+nodelist_fn+' filename to read in hostnames' else: print 'Using default missing.list filename to read in hostnames' # get hosts from file #if nodelist_fn: # hosts = get_remote_hosts_from_file(nodelist_fn) #else: # use default fn # hosts = get_remote_hosts_from_file() # '128.208.1.130', 128.208.1.217 # or manually type in hosts here hosts = [ ] # if we have hostnames if hosts: # BEGIN func_handle = parallelize_repy.parallelize_initfunction(hosts, worker, 10) size = str(len(hosts)) while not parallelize_repy.parallelize_isfunctionfinished(func_handle): results_dict = parallelize_repy.parallelize_getresults(func_handle) print str(len(results_dict['aborted']))+' aborted, '+str(len(results_dict['exception']))+\ ' exceptioned, '+str(len(results_dict['returned']))+' finished of '+size+' total.' time.sleep(5) deploy_threading.destroy()
def threading_currentlyrunning(): """ <Purpose> Helper method for figuring out if the function is still running or not. <Arguments> None. <Exceptions> None. <Side Effects> None. <Returns> Boolean. Is the function done or not? """ # grab the function handle and then the results of that function func_handle = thread_communications['func_handle'] threads_running = parallelize_repy.parallelize_isfunctionfinished(func_handle) results_dict = parallelize_repy.parallelize_getresults(func_handle) size = thread_communications['total_hosts'] print str(len(results_dict['aborted']))+' aborted, '+str(len(results_dict['exception']))+\ ' exceptioned, '+str(len(results_dict['returned']))+' finished of '+str(size)+' total.' return not threads_running
def threading_currentlyrunning(): """ <Purpose> Helper method for figuring out if the function is still running or not. <Arguments> None. <Exceptions> None. <Side Effects> None. <Returns> Boolean. Is the function done or not? """ # grab the function handle and then the results of that function func_handle = thread_communications['func_handle'] threads_running = parallelize_repy.parallelize_isfunctionfinished( func_handle) results_dict = parallelize_repy.parallelize_getresults(func_handle) size = thread_communications['total_hosts'] print str(len(results_dict['aborted']))+' aborted, '+str(len(results_dict['exception']))+\ ' exceptioned, '+str(len(results_dict['returned']))+' finished of '+str(size)+' total.' return not threads_running
dns_dict['flagged'] = [] # keeps track of the rerversed dns entries advertise_reversedns = [] # loopbacks, network, eg: 192.* networkiplist = [] # start multiple threads to reverse lookup all the hostnames/ips as our iplist2.list file # might have mixed forms (1.1.1.1 <-> bla.foo.bar, and we have to match those up) func_handle = parallelize_repy.parallelize_initfunction(advertised_nodes_list, deploy_helper.dnslookup, 15) while not parallelize_repy.parallelize_isfunctionfinished(func_handle): time.sleep(1) # Function is done results_dict = parallelize_repy.parallelize_getresults(func_handle) for each_key in results_dict.keys(): print results_dict[each_key] for each_tuple in results_dict['returned']: iphostname = each_tuple[0] reverse_iphostname = each_tuple[1] print str(iphostname)+' resolves to '+reverse_iphostname if iphostname == reverse_iphostname: networkiplist.append(iphostname) else: dns_dict[iphostname] = reverse_iphostname dns_dict[reverse_iphostname] = iphostname advertise_reversedns.append(reverse_iphostname)