def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() nodes = get_nodes(ironic_client, args.node_type) introspect_nodes(args.in_band, ironic_client, nodes)
def validate_node_placement(): logger.info("Validating node placement...") # For each role/flavor, node indices must start at 0 and increase by 1 ironic = IronicHelper.get_ironic_client() flavor_to_indices = {} for node in ironic.node.list(detail=True): # Skip nodes that are in maintenance mode if node.maintenance: continue # Get the value of the "node" capability node_capability = None capabilities = node.properties["capabilities"] for capability in capabilities.split(","): (key, val) = capability.split(":") if key == "node": node_capability = val # If the node capability was not set then error out if not node_capability: ip, _ = CredentialHelper.get_drac_ip_and_user(node) raise ValueError("Error: Node {} has not been assigned a node " "placement index. Run assign_role for this " "node and specify a role with the " "<role>-<index> format".format(ip)) hyphen = node_capability.rfind("-") flavor = node_capability[0:hyphen] index = node_capability[hyphen + 1:] # Build up a dict that maps a flavor name to a sequence of placment # indices if flavor not in flavor_to_indices: flavor_to_indices[flavor] = [] flavor_to_indices[flavor].append(int(index)) # Validate that the sequence starts at zero and is coherent error_msg = '' for flavor in flavor_to_indices.keys(): flavor_to_indices[flavor].sort() seq = flavor_to_indices[flavor] if seq[0] != 0: error_msg += "Error: There must be a node with flavor \"{}\" " \ "that has node placement index 0. Current nodes placement " \ "indices are {}\n".format(flavor, str(seq)) if not is_coherent(seq): error_msg += "Error: Nodes that have been assigned the \"{}\" " \ "flavor do not have node placement indices that increase by " \ "1. Current node indices are {}\n".format(flavor, str(seq)) # If any errors were detected then bail if error_msg: raise ValueError(error_msg)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() node = IronicHelper.get_ironic_node(ironic_client, args.ip_service_tag) introspect_nodes.introspect_nodes(args.in_band, ironic_client, [node])
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) # Load the nodes into ironic logger.info("Importing {} into ironic".format(args.node_definition)) cmd = ["openstack", "baremetal", "import", "--json", args.node_definition] exit_code, stdin, stderr = Exec.execute_command(cmd) if exit_code != 0: logger.error("Failed to import nodes into ironic: {}, {}".format( stdin, stderr)) sys.exit(1) # Load the instack file try: json_file = os.path.expanduser(args.node_definition) with open(json_file, 'r') as instackenv_json: instackenv = json.load(instackenv_json) except (IOError, ValueError): logger.exception("Failed to load node definition file {}".format( args.node_definition)) sys.exit(1) nodes = instackenv["nodes"] # Loop thru the nodes for node in nodes: # Find the node in ironic ironic_client = IronicHelper.get_ironic_client() ironic_node = IronicHelper.get_ironic_node(ironic_client, node["pm_addr"]) # Set the model and service tag on the node logger.info("Setting model ({}), service tag ({}), and provisioning " "MAC ({}) on {}".format( node["model"] if "model" in node else "None", node["service_tag"], node["provisioning_mac"] if "provisioning_mac" in node else "None", node["pm_addr"])) patch = [{'op': 'add', 'value': node["service_tag"], 'path': '/properties/service_tag'}] if "model" in node: patch.append({'op': 'add', 'value': node["model"], 'path': '/properties/model'}) if "provisioning_mac" in node: patch.append({'op': 'add', 'value': node["provisioning_mac"], 'path': '/properties/provisioning_mac'}) ironic_client.node.update(ironic_node.uuid, patch)
def __init__(self): self.data = {'nics': {}, 'cpus': {}} self.inspector = None self.total_cpus = None self.host_cpus = None self.pmd_cpus = None self.nova_cpus = None self.isol_cpus = None self.socket_mem = None self.get_inspector_client() self.ironic = IronicHelper.get_ironic_client()
def main(): os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() auth_url = os_auth_url + "/v3" ironic = IronicHelper.get_ironic_client() nodes = ironic.node.list(detail=True) auth = v3.Password( auth_url=auth_url, username=os_username, password=os_password, project_name=os_tenant_name, user_domain_name=os_user_domain_name, project_domain_name=os_project_domain_name ) sess = session.Session(auth=auth) nova = novaclient.Client('2', session=sess) # Slightly odd syntax for declaring 'banner' reduces the line length banner = ( "+-----------------+---------------------------+-----------------+" ) nodeinfo = "| {:<15} | {:<25} | {:<15} |" print(banner) print(nodeinfo.format('iDRAC Addr', 'Node Name', 'Provision Addr')) print(banner) # Display the list ordered by the iDRAC address for n in sorted(nodes, key=lambda x: CredentialHelper.get_drac_ip(x)): idrac_addr = CredentialHelper.get_drac_ip(n) if 'display_name' in n.instance_info: node_name = n.instance_info['display_name'] else: node_name = 'None' prov_addr = 'None' if n.instance_uuid: nova_ips = nova.servers.ips(n.instance_uuid) if nova_ips and 'ctlplane' in nova_ips: prov_addr = nova_ips['ctlplane'][0]['addr'] print(nodeinfo.format(idrac_addr, node_name, prov_addr)) print(banner)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() for node in ironic_client.node.list(detail=True): ip, username, password = \ CredentialHelper.get_drac_creds_from_node(node) # Power off the node cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \ "power off".format(ip, username, password) logger.info("Powering off {}".format(ip)) logger.debug(" {}".format(cmd)) os.system(cmd) # Set the first boot device to PXE cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \ "bootdev pxe options=persistent".format(ip, username, password) logger.info( "Setting the provisioning NIC to PXE boot on {}".format(ip)) logger.debug(" {}".format(cmd)) os.system(cmd) if not args.skip: os_auth_url, os_tenant_name, os_username, os_password = \ CredentialHelper.get_undercloud_creds() cmd = "openstack baremetal configure boot " \ "--os-auth-url {} " \ "--os-project-name {} " \ "--os-username {} " \ "--os-password {} " \ "".format(os_auth_url, os_tenant_name, os_username, os_password) logger.info("Assigning the kernel and ramdisk image to all nodes") logger.debug(cmd) os.system(cmd)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) # Load the nodes into ironic import_json = os.path.expanduser('~/nodes.json') content = json.load(open(args.node_definition)) for node in content['nodes']: for k in node.keys(): if k in DOWNSTREAM_ATTRS: node.pop(k) with open(import_json, 'w') as out: json.dump(content, out) logger.info("Importing {} into ironic".format(args.node_definition)) cmd = ["openstack", "overcloud", "node", "import", import_json] exit_code, stdin, stderr = Exec.execute_command(cmd) if exit_code != 0: logger.error("Failed to import nodes into ironic: {}, {}".format( stdin, stderr)) sys.exit(1) # Load the instack file try: json_file = os.path.expanduser(args.node_definition) with open(json_file, 'r') as instackenv_json: instackenv = json.load(instackenv_json) except (IOError, ValueError): logger.exception("Failed to load node definition file {}".format( args.node_definition)) sys.exit(1) nodes = instackenv["nodes"] # Loop thru the nodes for node in nodes: # Find the node in ironic ironic_client = IronicHelper.get_ironic_client() ironic_node = IronicHelper.get_ironic_node(ironic_client, node["pm_addr"]) # Set the model and service tag on the node logger.info("Setting model ({}), service tag ({}), and provisioning " "MAC ({}) on {}".format( node["model"] if "model" in node else "None", node["service_tag"], node["provisioning_mac"] if "provisioning_mac" in node else "None", node["pm_addr"])) patch = [{'op': 'add', 'value': node["service_tag"], 'path': '/properties/service_tag'}] if "model" in node: patch.append({'op': 'add', 'value': node["model"], 'path': '/properties/model'}) if "provisioning_mac" in node: patch.append({'op': 'add', 'value': node["provisioning_mac"], 'path': '/properties/provisioning_mac'}) if utils.Utils.is_enable_routed_networks(): logger.info("Adding port with physical address to node: %s", str(ironic_node.uuid)) kwargs = {'address': node["provisioning_mac"], 'physical_network': 'ctlplane', 'node_uuid': ironic_node.uuid} ironic_client.port.create(**kwargs) ironic_client.node.update(ironic_node.uuid, patch)
def build_node_list(self): self.nodes = [] # Pull in the nodes that nova doesn't know about in our json file for server_name in self.network_config["nodes"].keys(): server = self.network_config["nodes"][server_name] node = self.Node(server_name, server["ip"], server["user"], server["networks"]) self.nodes.append(node) # Sort just these by name so the SAH/Director/Dashboard nodes come # first self.nodes.sort(key=lambda n: n.name) os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() auth_url = os_auth_url + "v3" kwargs = { 'username': os_username, 'password': os_password, 'auth_url': os_auth_url, 'project_id': os_tenant_name, 'user_domain_name': os_user_domain_name, 'project_domain_name': os_project_domain_name } auth = v3.Password(auth_url=auth_url, username=os_username, password=os_password, project_name=os_tenant_name, user_domain_name=os_user_domain_name, project_domain_name=os_project_domain_name) sess = session.Session(auth=auth) nova = novaclient.Client('2', session=sess) ironic = IronicHelper.get_ironic_client() # Build up a map that maps flavor ids to flavor names flavor_map = {} flavors = nova.flavors.list(detailed=False) for flavor in flavors: flavor_map[flavor.id] = flavor.name logger.debug("flavor_map is:") for flavor in flavor_map.keys(): logger.debug(" " + flavor + " => " + flavor_map[flavor]) # Get the nodes from nova tmp_nodes = [] nova_servers = nova.servers.list() for nova_server in nova_servers: flavor_name = None if nova_server.flavor["id"]: flavor_name = flavor_map[nova_server.flavor["id"]] if flavor_name == "baremetal": flavor_name = None if not flavor_name: ironic_server = ironic.node.get_by_instance_uuid( nova_server.id) capabilities = ironic_server.properties["capabilities"] match = re.search("node:([a-zA-Z-]+)-\d+", capabilities) if match: flavor_name = match.group(1) else: logger.error("Unable to find flavor name for " "node {}".format(nova_server.name)) sys.exit(1) # From the flavor, get the networks networks = self.network_config["flavors_to_networks"][flavor_name] node = self.Node(nova_server.name, nova_server.networks["ctlplane"][0], "heat-admin", networks) tmp_nodes.append(node) # Sort the overcloud nodes by name to group the role types together tmp_nodes.sort(key=lambda n: n.name) self.nodes.extend(tmp_nodes)
def _get_nodes(self): os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() auth_url = os_auth_url + "/v3" provisioning_network = NetworkHelper.get_provisioning_network() kwargs = {'os_username': os_username, 'os_password': os_password, 'os_auth_url': os_auth_url, 'os_tenant_name': os_tenant_name, 'os_user_domain_name': os_user_domain_name, 'os_project_domain_name': os_project_domain_name} i_client = IronicHelper.get_ironic_client() auth = v3.Password( auth_url=auth_url, username=os_username, password=os_password, project_name=os_tenant_name, user_domain_name=os_user_domain_name, project_domain_name=os_project_domain_name ) sess = session.Session(auth=auth) n_client = nova_client.Client(2, session=sess) # Build up a dictionary that maps roles to a list of IPs for that role self.node_roles_to_nodes = {} self.logger.debug("Querying ironic and nova for nodes") nodes = i_client.node.list(fields=["uuid", "instance_uuid", "properties"]) for node in nodes: uuid = node.uuid instance_uuid = node.instance_uuid # Handle the case where we have a node in ironic that's not in nova # (possibly due to the node being in maintenance mode in ironic or # the user not assigning a role to a node, etc) if instance_uuid is None: self.logger.debug("Ironic node " + uuid + " has no " "corresponding instance in nova. Skipping") continue capabilities = node.properties["capabilities"] capabilities = dict(c.split(':') for c in capabilities.split(',')) # Role is the 'profile' capability when node placement is not # in use. Otherwise it's encoded in the 'node' capability. if 'profile' in capabilities: role = capabilities['profile'] elif 'node' in capabilities: role = capabilities['node'] # Trim the trailing "-N" where N is the node number role = role[:role.rindex('-')] else: self.logger.error("Failed to determine role of node {}".format( node)) sys.exit(1) server = n_client.servers.get(instance_uuid) for address in server.addresses["ctlplane"]: ip = address["addr"] if IPAddress(ip) in provisioning_network: break self.logger.debug("Got node:\n" " uuid=" + uuid + "\n" " ip=" + ip + "\n" " role=" + role + "\n" " instance_uuid=" + instance_uuid) if role not in self.node_roles_to_nodes: self.node_roles_to_nodes[role] = [] self.node_roles_to_nodes[role].append(ip) self.logger.debug("node_roles_to_nodes: " + str(self.node_roles_to_nodes))