def build_network_to_subnet_map(self): # Build a map of network name to CIDR self.network_to_subnet = {} self.network_to_subnet["public_api"] = \ NetworkHelper.get_public_api_network() self.network_to_subnet["private_api"] = \ NetworkHelper.get_private_api_network() self.network_to_subnet["storage"] = \ NetworkHelper.get_storage_network() self.network_to_subnet["storage_clustering"] = \ NetworkHelper.get_storage_clustering_network() self.network_to_subnet["management"] = \ NetworkHelper.get_management_network() self.network_to_subnet["provisioning"] = \ NetworkHelper.get_provisioning_network() self.network_to_subnet["tenant"] = \ NetworkHelper.get_tenant_network() # Pull in custom networks from the json for network in self.network_config["networks"].keys(): self.network_to_subnet[network] = \ IPNetwork(self.network_config["networks"][network]) logger.debug("network_to_subnet map is:") for network in self.network_to_subnet.keys(): logger.debug(" " + network + " => " + str(self.network_to_subnet[network]))
def _get_nodes(self): os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() auth_url = os_auth_url + "v3" provisioning_network = NetworkHelper.get_provisioning_network() kwargs = { 'os_username': os_username, 'os_password': os_password, 'os_auth_url': os_auth_url, 'os_tenant_name': os_tenant_name, 'os_user_domain_name': os_user_domain_name, 'os_project_domain_name': os_project_domain_name } i_client = ironic_client.get_client(1, **kwargs) auth = v3.Password(auth_url=auth_url, username=os_username, password=os_password, project_name=os_tenant_name, user_domain_name=os_user_domain_name, project_domain_name=os_project_domain_name) sess = session.Session(auth=auth) n_client = nova_client.Client(2, session=sess) # Build up a dictionary that maps roles to a list of IPs for that role self.node_roles_to_nodes = {} self.logger.debug("Querying ironic and nova for nodes") nodes = i_client.node.list( fields=["uuid", "instance_uuid", "properties"]) for node in nodes: uuid = node.uuid instance_uuid = node.instance_uuid # Handle the case where we have a node in ironic that's not in nova # (possibly due to the node being in maintenance mode in ironic or # the user not assigning a role to a node, etc) if instance_uuid is None: self.logger.debug("Ironic node " + uuid + " has no " "corresponding instance in nova. Skipping") continue capabilities = node.properties["capabilities"] capabilities = dict(c.split(':') for c in capabilities.split(',')) # Role is the 'profile' capability when node placement is not # in use. Otherwise it's encoded in the 'node' capability. if 'profile' in capabilities: role = capabilities['profile'] elif 'node' in capabilities: role = capabilities['node'] # Trim the trailing "-N" where N is the node number role = role[:role.rindex('-')] else: self.logger.error( "Failed to determine role of node {}".format(node)) sys.exit(1) server = n_client.servers.get(instance_uuid) for address in server.addresses["ctlplane"]: ip = address["addr"] if IPAddress(ip) in provisioning_network: break self.logger.debug("Got node:\n" " uuid=" + uuid + "\n" " ip=" + ip + "\n" " role=" + role + "\n" " instance_uuid=" + instance_uuid) if role not in self.node_roles_to_nodes: self.node_roles_to_nodes[role] = [] self.node_roles_to_nodes[role].append(ip) self.logger.debug("node_roles_to_nodes: " + str(self.node_roles_to_nodes))
class Node: """ This is essentially a helper class that makes it easy to run commands and transfer files on a remote system. In addition, it provides a simple framework for storing and updating the node's machine ID, which is required for the code that configures the Overcloud nodes to work with the Ceph Storage Dashboard. """ etc_hosts = "/etc/hosts" ansible_hosts = "/etc/ansible/hosts" ceph_conf = "/etc/ceph/ceph.conf" prometheus_yml = "/var/lib/cephmetrics/prometheus.yml" subscription_json = "~/pilot/subscription.json" root_home = expanduser("~root") heat_admin_home = expanduser("~heat-admin") storage_network = NetworkHelper.get_storage_network() def __init__(self, address, username, password=None): self.address = address self.username = username self.password = password self.fqdn = address # Initial value that will be updated later def _read_machine_id(self): self.machine_id = self.run("cat /etc/machine-id") def initialize(self): self.fqdn = self.run("hostname") self._read_machine_id() # Find the node's IP address on the storage network addrs = self.run("sudo ip -4 addr | awk '$1 == \"inet\" {print $2}'") for addr in addrs.split("\n"): ip_network = IPNetwork(addr) if ip_network == Node.storage_network: self.storage_ip = str(ip_network.ip) break if not getattr(self, "storage_ip", None): msg = "Node at {} does not have an IP address on the storage" \ " network".format(self.address) LOG.error(msg) raise DashboardException(msg) def execute(self, command): status, stdout, stderr = Ssh.execute_command(self.address, command, user=self.username, password=self.password) # For our purposes, any leading or trailing '\n' just gets in the way stdout = stdout.strip("\n") stderr = stderr.strip("\n") LOG.debug("Executed command on {}: \n" " command : {}\n" " status : {}\n" " stdout : {}\n" " stderr : {}".format(self.fqdn, command, status, stdout, stderr)) return status, stdout, stderr def run(self, command, check_status=True): status, stdout, stderr = self.execute(command) if int(status) != 0 and check_status: raise DashboardException( "Command execution failed on {} ({})".format( self.fqdn, self.address)) return stdout def put(self, localfile, remotefile): LOG.debug("Copying {} to {}@{}:{}".format(localfile, self.username, self.fqdn, remotefile)) Scp.put_file(self.address, localfile, remotefile, user=self.username, password=self.password) def get(self, localfile, remotefile): LOG.debug("Copying {}@{}:{} to {}".format(self.username, self.fqdn, remotefile, localfile)) Scp.get_file(self.address, localfile, remotefile, user=self.username, password=self.password)
def main(): sah_user = "******" args = parse_arguments(sah_user) LoggingHelper.configure_logging(args.logging_level, noisy_logger="paramiko") sah_password = args.password if not sah_password: sah_password = getpass("Enter the password for the " "{} user of the SAH node: ".format(sah_user)) management_net = NetworkHelper.get_management_network() dhcp_conf = os.path.join(os.path.expanduser('~'), 'pilot', 'dhcpd.conf') LOG.info("Creating dhcp configuration file {}".format(dhcp_conf)) dhcp_conf_template = os.path.join(os.path.expanduser('~'), 'pilot', 'templates', 'dhcpd.conf') try: in_file = open(dhcp_conf_template, 'r') file_text = in_file.read() except IOError: LOG.exception("Could not open dhcp.conf template file {}".format( dhcp_conf_template)) sys.exit(1) token_map = {} token_map["SUBNET"] = str(management_net.network) token_map["NETMASK"] = str(management_net.netmask) token_map["BROADCAST"] = str(management_net.broadcast) token_map["GATEWAY"] = NetworkHelper.get_management_network_gateway() for token in token_map.keys(): file_text = file_text.replace(token, token_map[token]) # Get the management network pools management_net_pools = NetworkHelper.get_management_network_pools() # Plug in the management pool ranges range_lines = "" for pool in management_net_pools: range_lines += " range {} {};\n".format( pool["start"], pool["end"]) file_text = re.sub("[ \t]*range[ \t]+POOL_START[ \t]+POOL_END;\n", range_lines, file_text) try: with open(dhcp_conf, 'w') as out_file: out_file.write(file_text) except IOError: LOG.exception("Could not open {} for writing.".format(dhcp_conf)) sys.exit(1) # scp dhcp.conf to the SAH dest_dhcp_conf = "/etc/dhcp/dhcpd.conf" LOG.info("Copying {} to {}@{}:{}".format(dhcp_conf, sah_user, args.sah_ip, dest_dhcp_conf)) Scp.put_file(args.sah_ip, dhcp_conf, dest_dhcp_conf, user=sah_user, password=sah_password) # The dhcp service will not start without an existing leases file, # so touch it to make sure it exists before starting the service dhcp_leases = "/var/lib/dhcpd/dhcpd.leases" LOG.info("Touching {}:{} as {}".format(args.sah_ip, dhcp_leases, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "touch " + dhcp_leases, user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to touch {}:{}: {}".format(args.sah_ip, dhcp_leases, std_err)) sys.exit(1) # Enable and restart the dhcp server on the SAH LOG.info("Enabling dhcpd on {} as {}".format(args.sah_ip, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "systemctl enable dhcpd", user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to enable dhcpd on {}: {}".format(args.sah_ip, std_err)) sys.exit(1) LOG.info("Restarting dhcpd on {} as {}".format(args.sah_ip, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "systemctl restart dhcpd", user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to restart dhcpd on {}: {}".format(args.sah_ip, std_err)) sys.exit(1)