def define_nodes(self): """ define the node objects for this cluster based on gluster pool list output """ if self.output_mode == 'console' and not cfg.no_progress_msgs: # display a progress message sys.stdout.write("Processing nodes" + " " * 20 + "\n\r\x1b[A") cmd = GlusterCommand('gluster pool list --xml') cmd.run() if cmd.rc != 0: print "glusterd did not respond to a peer status request, gstatus" print "can not continue.\n" exit(12) # define a list of elements in the xml that we're interested in field_list = ['hostname', 'uuid', 'connected'] xml_string = ''.join(cmd.stdout) xml_root = ETree.fromstring(xml_string) peer_list = xml_root.findall('.//peer') for peer in peer_list: node_info = get_attr(peer, field_list) this_hostname = node_info['hostname'] alias_list = [] if this_hostname == 'localhost': # output may say localhost, but it could be a reponse from a # foreign peer, since the local glusterd could be down if GlusterCommand.targetNode == 'localhost': local_ip_list = get_ipv4_addr() # Grab all IP's for ip in local_ip_list: alias_list += host_aliases(ip) alias_list.append('localhost') else: this_hostname = GlusterCommand.targetNode alias_list = host_aliases(this_hostname) alias_list.append('localhost') else: alias_list = host_aliases(this_hostname) # DEBUG ------------------------------------------------------------ if cfg.debug: print "Creating a node object with uuid %s, with names of %s" % (node_info['uuid'], alias_list) # ------------------------------------------------------------------ new_node = Node(node_info['uuid'], node_info['connected'], alias_list) self.ip_list += [ip for ip in alias_list if is_ip(ip)] # add this node object to the cluster objects 'dict' self.node[node_info['uuid']] = new_node self.node_count = Node.node_count()
def define_nodes(self): """ define the node objects for this cluster based on gluster pool list output """ if self.output_mode == 'console' and not cfg.no_progress_msgs: # display a progress message sys.stdout.write("Processing nodes" + " " * 20 + "\n\r\x1b[A") cmd = GlusterCommand('gluster pool list --xml', timeout=cfg.CMD_TIMEOUT) cmd.run() if cmd.rc != 0: print "glusterd did not respond to a peer status request, gstatus" print "can not continue.\n" exit(12) # define a list of elements in the xml that we're interested in field_list = ['hostname', 'uuid', 'connected'] xml_string = ''.join(cmd.stdout) xml_root = ETree.fromstring(xml_string) peer_list = xml_root.findall('.//peer') for peer in peer_list: node_info = get_attr(peer, field_list) this_hostname = node_info['hostname'] alias_list = [] if this_hostname == 'localhost': # output may say localhost, but it could be a reponse from a # foreign peer, since the local glusterd could be down if GlusterCommand.targetNode == 'localhost': local_ip_list = get_ipv4_addr() # Grab all IP's for ip in local_ip_list: alias_list += host_aliases(ip) alias_list.append('localhost') else: this_hostname = GlusterCommand.targetNode alias_list = host_aliases(this_hostname) alias_list.append('localhost') else: alias_list = host_aliases(this_hostname) # DEBUG ------------------------------------------------------------ if cfg.debug: # Clean up all the empty strings in the list self.alias_stripped = [ele for ele in alias_list if ele != ''] print "Creating a node object with uuid %s, with names of %s"%\ (node_info['uuid'], self.alias_stripped) # ------------------------------------------------------------------ new_node = Node(node_info['uuid'], node_info['connected'], alias_list) self.ip_list += [ip for ip in alias_list if is_ip(ip)] # add this node object to the cluster objects 'dict' self.node[node_info['uuid']] = new_node self.node_count = Node.node_count()