Пример #1
0
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    ironic_client = IronicHelper.get_ironic_client()
    node = IronicHelper.get_ironic_node(ironic_client, args.ip_service_tag)

    introspect_nodes.introspect_nodes(args.in_band, ironic_client, [node])
Пример #2
0
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    # Load the nodes into ironic
    logger.info("Importing {} into ironic".format(args.node_definition))
    cmd = ["openstack", "baremetal", "import", "--json", args.node_definition]
    exit_code, stdin, stderr = Exec.execute_command(cmd)
    if exit_code != 0:
        logger.error("Failed to import nodes into ironic: {}, {}".format(
            stdin, stderr))
        sys.exit(1)

    # Load the instack file
    try:
        json_file = os.path.expanduser(args.node_definition)
        with open(json_file, 'r') as instackenv_json:
            instackenv = json.load(instackenv_json)
    except (IOError, ValueError):
        logger.exception("Failed to load node definition file {}".format(
                         args.node_definition))
        sys.exit(1)

    nodes = instackenv["nodes"]

    # Loop thru the nodes
    for node in nodes:
        # Find the node in ironic
        ironic_client = IronicHelper.get_ironic_client()
        ironic_node = IronicHelper.get_ironic_node(ironic_client,
                                                   node["pm_addr"])

        # Set the model and service tag on the node
        logger.info("Setting model ({}), service tag ({}), and provisioning "
                    "MAC ({}) on {}".format(
                        node["model"] if "model" in node else "None",
                        node["service_tag"],
                        node["provisioning_mac"] if "provisioning_mac" in
                        node else "None",
                        node["pm_addr"]))
        patch = [{'op': 'add',
                  'value': node["service_tag"],
                  'path': '/properties/service_tag'}]

        if "model" in node:
            patch.append({'op': 'add',
                          'value': node["model"],
                          'path': '/properties/model'})

        if "provisioning_mac" in node:
            patch.append({'op': 'add',
                          'value': node["provisioning_mac"],
                          'path': '/properties/provisioning_mac'})

        ironic_client.node.update(ironic_node.uuid, patch)
Пример #3
0
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    ironic_client = IronicHelper.get_ironic_client()
    nodes = get_nodes(ironic_client, args.node_type)
    introspect_nodes(args.in_band, ironic_client, nodes)
Пример #4
0
def validate_node_placement():
    logger.info("Validating node placement...")

    # For each role/flavor, node indices must start at 0 and increase by 1
    ironic = IronicHelper.get_ironic_client()

    flavor_to_indices = {}
    for node in ironic.node.list(detail=True):
        # Skip nodes that are in maintenance mode
        if node.maintenance:
            continue

        # Get the value of the "node" capability
        node_capability = None
        capabilities = node.properties["capabilities"]
        for capability in capabilities.split(","):
            (key, val) = capability.split(":")
            if key == "node":
                node_capability = val

        # If the node capability was not set then error out
        if not node_capability:
            ip, _ = CredentialHelper.get_drac_ip_and_user(node)

            raise ValueError("Error: Node {} has not been assigned a node "
                             "placement index.  Run assign_role for this "
                             "node and specify a role with the "
                             "<role>-<index> format".format(ip))

        hyphen = node_capability.rfind("-")
        flavor = node_capability[0:hyphen]
        index = node_capability[hyphen + 1:]

        # Build up a dict that maps a flavor name to a sequence of placment
        # indices
        if flavor not in flavor_to_indices:
            flavor_to_indices[flavor] = []

        flavor_to_indices[flavor].append(int(index))

    # Validate that the sequence starts at zero and is coherent
    error_msg = ''
    for flavor in flavor_to_indices.keys():
        flavor_to_indices[flavor].sort()
        seq = flavor_to_indices[flavor]
        if seq[0] != 0:
            error_msg += "Error: There must be a node with flavor \"{}\" " \
                "that has node placement index 0.  Current nodes placement " \
                "indices are {}\n".format(flavor, str(seq))

        if not is_coherent(seq):
            error_msg += "Error: Nodes that have been assigned the \"{}\" " \
                "flavor do not have node placement indices that increase by " \
                "1.  Current node indices are {}\n".format(flavor, str(seq))

    # If any errors were detected then bail
    if error_msg:
        raise ValueError(error_msg)
Пример #5
0
 def __init__(self):
     self.data = {'nics': {}, 'cpus': {}}
     self.inspector = None
     self.total_cpus = None
     self.host_cpus = None
     self.pmd_cpus = None
     self.nova_cpus = None
     self.isol_cpus = None
     self.socket_mem = None
     self.get_inspector_client()
     self.ironic = IronicHelper.get_ironic_client()
Пример #6
0
def main():
    os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_undercloud_creds()
    auth_url = os_auth_url + "/v3"

    ironic = IronicHelper.get_ironic_client()
    nodes = ironic.node.list(detail=True)

    auth = v3.Password(
        auth_url=auth_url,
        username=os_username,
        password=os_password,
        project_name=os_tenant_name,
        user_domain_name=os_user_domain_name,
        project_domain_name=os_project_domain_name
    )

    sess = session.Session(auth=auth)
    nova = novaclient.Client('2', session=sess)

    # Slightly odd syntax for declaring 'banner' reduces the line length
    banner = (
        "+-----------------+---------------------------+-----------------+"
    )
    nodeinfo = "| {:<15} | {:<25} | {:<15} |"
    print(banner)
    print(nodeinfo.format('iDRAC Addr', 'Node Name', 'Provision Addr'))
    print(banner)
    # Display the list ordered by the iDRAC address
    for n in sorted(nodes, key=lambda x: CredentialHelper.get_drac_ip(x)):
        idrac_addr = CredentialHelper.get_drac_ip(n)

        if 'display_name' in n.instance_info:
            node_name = n.instance_info['display_name']
        else:
            node_name = 'None'

        prov_addr = 'None'
        if n.instance_uuid:
            nova_ips = nova.servers.ips(n.instance_uuid)
            if nova_ips and 'ctlplane' in nova_ips:
                prov_addr = nova_ips['ctlplane'][0]['addr']

        print(nodeinfo.format(idrac_addr, node_name, prov_addr))
    print(banner)
Пример #7
0
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    ironic_client = IronicHelper.get_ironic_client()

    for node in ironic_client.node.list(detail=True):
        ip, username, password = \
            CredentialHelper.get_drac_creds_from_node(node)

        # Power off the node
        cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \
            "power off".format(ip, username, password)
        logger.info("Powering off {}".format(ip))
        logger.debug("    {}".format(cmd))
        os.system(cmd)

        # Set the first boot device to PXE
        cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \
            "bootdev pxe options=persistent".format(ip, username, password)
        logger.info(
            "Setting the provisioning NIC to PXE boot on {}".format(ip))
        logger.debug("    {}".format(cmd))
        os.system(cmd)

    if not args.skip:
        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        cmd = "openstack baremetal configure boot " \
            "--os-auth-url {} " \
            "--os-project-name {} " \
            "--os-username {} " \
            "--os-password {} " \
            "".format(os_auth_url,
                      os_tenant_name,
                      os_username,
                      os_password)

        logger.info("Assigning the kernel and ramdisk image to all nodes")
        logger.debug(cmd)
        os.system(cmd)
Пример #8
0
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"
        self.nfv_params = NfvParameters()

    def find_ifaces_by_keyword(self, yaml_file, keyword):
        nics = []
        with open(yaml_file, 'r') as f:
            content = f.readlines()
            for line in content:
                if keyword in line:
                    nics.append(line.split(':')[1].strip())
        return nics

    def edit_environment_files(self,
                               mtu,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               sriov,
                               hw_offload,
                               sriov_interfaces,
                               nic_env_file,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               time_zone,
                               dell_compute_count=0,
                               dell_computehci_count=0,
                               dell_powerflex_count=0):

        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            hw_off_file = home_dir + '/pilot/templates/ovs-hw-offload.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend(
                ('sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                 str(dell_compute_count) + '|" ' + file_path,
                 'sed -i "s|DellComputeHCICount:.*|DellComputeHCICount: ' +
                 str(dell_computehci_count) + '|" ' + file_path,
                 'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                 str(controller_count) + '|" ' + file_path,
                 'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                 str(ceph_storage_count) + '|" ' + file_path,
                 'sed -i "s|PowerflexStorageCount:.*|PowerflexStorageCount: ' +
                 str(dell_powerflex_count) + '|" ' + file_path,
                 'sed -i "s|OvercloudControllerFlavor:.*' +
                 '|OvercloudControllerFlavor: ' + str(controller_flavor) +
                 '|" ' + file_path, 'sed -i "s|OvercloudCephStorageFlavor:.*' +
                 '|OvercloudCephStorageFlavor: ' + str(ceph_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudSwiftStorageFlavor:.*' +
                 '|OvercloudSwiftStorageFlavor: ' + str(swift_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudBlockStorageFlavor:.*' +
                 '|OvercloudBlockStorageFlavor: ' + str(block_storage_flavor) +
                 '|" ' + file_path, 'sed -i "s|NeutronNetworkVLANRanges:.*' +
                 '|NeutronNetworkVLANRanges: ' + 'physint:' + str(vlan_range) +
                 ',physext'
                 '|" ' + file_path))
            kernel_args = ''
            if sriov or ovs_dpdk:
                kernel_args = "iommu=pt intel_iommu=on"

            if enable_hugepage:
                hpg_num = self.nfv_params.calculate_hugepage_count(
                    hugepage_size)
                kernel_args += " default_hugepagesz=%s hugepagesz=%s" \
                    " hugepages=%s" \
                    % (hugepage_size, hugepage_size[0:-1], str(hpg_num))

            if enable_numa:
                node_uuid, node_data = self.nfv_params.select_compute_node()
                self.nfv_params.parse_data(node_data)
                self.nfv_params.get_all_cpus()
                self.nfv_params.get_host_cpus(hostos_cpu_count)
                if ovs_dpdk:
                    dpdk_nics = self.find_ifaces_by_keyword(
                        nic_env_file, 'Dpdk')
                    logger.debug("DPDK-NICs >>" + str(dpdk_nics))
                    self.nfv_params.get_pmd_cpus(mtu, dpdk_nics)
                    self.nfv_params.get_socket_memory(mtu, dpdk_nics)
                self.nfv_params.get_nova_cpus()
                self.nfv_params.get_isol_cpus()
                kernel_args += " isolcpus=%s" % self.nfv_params.isol_cpus
                cmds.append(
                    'sed -i "s|# NovaComputeCpuDedicatedSet:.*|NovaComputeCpuDedicatedSet: '
                    + self.nfv_params.nova_cpus + '|" ' + file_path)
            if kernel_args:
                cmds.append('sed -i "s|# DellComputeParameters:' +
                            '|DellComputeParameters:|" ' + file_path)
                cmds.append('sed -i "s|# KernelArgs:.*|KernelArgs: \\"' +
                            kernel_args + '\\" |" ' + file_path)
            if ovs_dpdk:
                cmds.append(
                    'sed -i "s|OvsDpdkCoreList:.*|OvsDpdkCoreList: \\"' +
                    self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append(
                    'sed -i "s|NovaComputeCpuSharedSet:.*|NovaComputeCpuSharedSet: \\"'
                    + self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsPmdCoreList:.*|OvsPmdCoreList: \\"' +
                            self.nfv_params.pmd_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsDpdkSocketMemory:' +
                            '.*|OvsDpdkSocketMemory: \\"' +
                            self.nfv_params.socket_mem + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|IsolCpusList:.*|IsolCpusList: \\"' +
                            self.nfv_params.isol_cpus + '\\" |" ' + dpdk_file)

            if dell_powerflex_count > 0:
                cmds.append('sed -i "s|NovaEnableRbdBackend:.*' +
                            '|NovaEnableRbdBackend: false |" ' + file_path)
                cmds.append('sed -i "s|CinderEnableRbdBackend:.*' +
                            '|CinderEnableRbdBackend: false |" ' + file_path)
                cmds.append('sed -i "s|GlanceBackend:.*' +
                            '|GlanceBackend: cinder|" ' + file_path)

            cmds.append('sed -i "s|TimeZone:.*' + '|TimeZone: \\"' +
                        time_zone + '\\" |" ' + file_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in nova.servers.list():
                if "dell-compute" in host.name:
                    hostname = str(host.name)
                    dell_hosts.append(hostname)

            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")
Пример #9
0
class ConfigEdge(ConfigOvercloud):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name, node_type, node_type_data):
        self.node_type = node_type
        self.node_type_data = json.loads(node_type_data)
        self.mtu = int(self.node_type_data["nfv_mtu"])
        _dir = (re.sub(r'[^a-z0-9]', " ", node_type.lower()).replace(" ", "_"))
        _ntl = re.sub(r'[^a-z0-9]', "", node_type.lower())
        ne_name = "nic_environment_{}.yaml".format(_ntl)
        instack_name = "instackenv_{}.json".format(_ntl)
        nic_env_file = os.path.join(home_dir, _dir, ne_name)
        instackenv_file = os.path.join(home_dir, _dir, instack_name)
        self.instackenv = instackenv_file
        self.nic_env = nic_env_file
        super().__init__(overcloud_name)

    def fetch_nfv_parameters(self):
        logger.debug("Retrieving NFV parameters")
        ntd = self.node_type_data
        enable_hugepage = Utils.string_to_bool(ntd["hpg_enable"])
        enable_numa = Utils.string_to_bool(ntd["numa_enable"])
        nfv_type = self._get_nfv_type(ntd)
        is_ovs_dpdk = bool(nfv_type and nfv_type in ["dpdk", "both"])
        hostos_cpu_count = int(ntd["numa_hostos_cpu_count"])
        _dir = (re.sub(r'[^a-z0-9]', " ",
                       self.node_type.lower()).replace(" ", "_"))
        ntl = re.sub(r'[^a-z0-9]', "", self.node_type.lower())
        _f_name = "nic_environment_{}.yaml".format(ntl)
        nic_env_file = os.path.join(home_dir, _dir, _f_name)
        params = {}
        params_dell_env = params["dell_env"] = {}
        kernel_args = "iommu=pt intel_iommu=on"

        if enable_hugepage:
            hpg_num = self.nfv_params.calculate_hugepage_count(ntd["hpg_size"])
            kernel_args += (" default_hugepagesz={} hugepagesz={}"
                            " hugepages={}").format(ntd["hpg_size"],
                                                    ntd["hpg_size"][0:-1],
                                                    str(hpg_num))
        if enable_numa:
            _, node_data = self.nfv_params.select_compute_node(
                self.node_type, self.instackenv)
            self.nfv_params.parse_data(node_data)
            self.nfv_params.get_all_cpus()
            self.nfv_params.get_host_cpus(hostos_cpu_count)
            self.nfv_params.get_nova_cpus()
            self.nfv_params.get_isol_cpus()
            if is_ovs_dpdk:
                dpdk_nics = self.find_ifaces_by_keyword(nic_env_file, 'Dpdk')
                logger.debug("DPDK-NICs >>" + str(dpdk_nics))
                self.nfv_params.get_pmd_cpus(self.mtu, dpdk_nics)
                self.nfv_params.get_socket_memory(self.mtu, dpdk_nics)
            kernel_args += " isolcpus={}".format(self.nfv_params.isol_cpus)
            # dell-environmment role specific cpu parameters
            params_dell_env["IsolCpusList"] = self.nfv_params.isol_cpus
            params_dell_env[
                "NovaComputeCpuDedicatedSet"] = self.nfv_params.nova_cpus
        if is_ovs_dpdk:
            params_dpdk = params["dpdk"] = {}
            params_dpdk["OvsDpdkCoreList"] = self.nfv_params.host_cpus
            params_dpdk["OvsPmdCoreList"] = self.nfv_params.pmd_cpus
            params_dpdk["OvsDpdkSocketMemory"] = self.nfv_params.socket_mem
            # params_dpdk["IsolCpusList"] = self.nfv_params.isol_cpus # Populated in dell_env file
            # params_dpdk["NovaComputeCpuDedicatedSet"] = self.nfv_params.nova_cpus # Populated in dell_env file
            # params_dpdk["NovaComputeCpuSharedSet"] = self.nfv_params.shared_cpus # Not used in current Architecture

        params_dell_env["KernelArgs"] = kernel_args
        return params

    def _get_nfv_type(self, node_type_data):
        if ("nfv_type" in node_type_data
                and len(node_type_data["nfv_type"].strip()) != 0
                and node_type_data["nfv_type"].strip()
                in ("dpdk", "sriov", "both")):
            return node_type_data["nfv_type"].strip()
        return None
Пример #10
0
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"
        self.nfv_params = NfvParameters()

    def find_ifaces_by_keyword(self, yaml_file, keyword):
        nics = []
        with open(yaml_file, 'r') as f:
            content = f.readlines()
            for line in content:
                if keyword in line:
                    nics.append(line.split(':')[1].strip())
        return nics

    def edit_environment_files(self,
                               mtu,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               sriov,
                               nic_env_file,
                               mariadb_max_connections,
                               innodb_buffer_pool_size,
                               innodb_buffer_pool_instances,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               dell_compute_count=0):
        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend(
                ('sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                 str(dell_compute_count) + '|" ' + file_path,
                 'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                 str(controller_count) + '|" ' + file_path,
                 'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                 str(ceph_storage_count) + '|" ' + file_path,
                 'sed -i "s|OvercloudControllerFlavor:.*' +
                 '|OvercloudControllerFlavor: ' + str(controller_flavor) +
                 '|" ' + file_path, 'sed -i "s|OvercloudCephStorageFlavor:.*' +
                 '|OvercloudCephStorageFlavor: ' + str(ceph_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudSwiftStorageFlavor:.*' +
                 '|OvercloudSwiftStorageFlavor: ' + str(swift_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudBlockStorageFlavor:.*' +
                 '|OvercloudBlockStorageFlavor: ' + str(block_storage_flavor) +
                 '|" ' + file_path, 'sed -i "s|NeutronNetworkVLANRanges:.*' +
                 '|NeutronNetworkVLANRanges: ' + 'physint:' + str(vlan_range) +
                 ',physext'
                 '|" ' + file_path))
            kernel_args = ''
            if sriov or ovs_dpdk:
                kernel_args = "iommu=pt intel_iommu=on"
            if enable_hugepage:
                hpg_num = self.nfv_params.calculate_hugepage_count(
                    hugepage_size)
                kernel_args += " default_hugepagesz=%s hugepagesz=%s" \
                    " hugepages=%s" \
                    % (hugepage_size, hugepage_size[0:-1], str(hpg_num))

            if enable_numa:
                node_uuid, node_data = self.nfv_params.select_compute_node()
                self.nfv_params.parse_data(node_data)
                self.nfv_params.get_all_cpus()
                self.nfv_params.get_host_cpus(hostos_cpu_count)
                if ovs_dpdk:
                    dpdk_nics = self.find_ifaces_by_keyword(
                        nic_env_file, 'Dpdk')
                    self.nfv_params.get_pmd_cpus(mtu, dpdk_nics)
                    self.nfv_params.get_socket_memory(mtu, dpdk_nics)
                self.nfv_params.get_nova_cpus()
                self.nfv_params.get_isol_cpus()
                kernel_args += " isolcpus=%s" % self.nfv_params.nova_cpus
                cmds.append('sed -i "s|# NovaVcpuPinSet:.*|NovaVcpuPinSet: ' +
                            self.nfv_params.nova_cpus + '|" ' + file_path)
            cmds.append('sed -i "s|# DellComputeParameters:' +
                        '|DellComputeParameters:|" ' + file_path)
            if kernel_args:
                cmds.append('sed -i "s|# KernelArgs:.*|KernelArgs: \\"' +
                            kernel_args + '\\" |" ' + file_path)
            if ovs_dpdk:
                cmds.append(
                    'sed -i "s|OvsDpdkCoreList:.*|OvsDpdkCoreList: \\"' +
                    self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsPmdCoreList:.*|OvsPmdCoreList: \\"' +
                            self.nfv_params.pmd_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsDpdkSocketMemory:' +
                            '.*|OvsDpdkSocketMemory: \\"' +
                            self.nfv_params.socket_mem + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|# IsolCpusList:.*|IsolCpusList: ' +
                            self.nfv_params.isol_cpus + '|" ' + dpdk_file)

            # Performance and Optimization
            if innodb_buffer_pool_size != "dynamic":
                BufferPoolSize = int(innodb_buffer_pool_size.replace(
                    "G", "")) * 1024
                memory_mb = self.nfv_params.get_minimum_memory_size("control")
                if memory_mb < BufferPoolSize:
                    raise Exception("innodb_buffer_pool_size is greater than"
                                    " available memory size")
            cmds.append(
                'sed -i "s|MysqlMaxConnections.*|MysqlMaxConnections: ' +
                mariadb_max_connections + '|" ' + file_path)
            if ovs_dpdk:
                f_path = dpdk_file
            else:
                f_path = file_path
            cmds.append('sed -i "s|BufferPoolSize.*|BufferPoolSize: ' +
                        innodb_buffer_pool_size + '|" ' + f_path)
            cmds.append(
                'sed -i "s|BufferPoolInstances.*|BufferPoolInstances: ' +
                innodb_buffer_pool_instances + '|" ' + f_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in nova.servers.list():
                if "dell-compute" in host.name:
                    hostname = str(host.name)
                    dell_hosts.append(hostname)

            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")
Пример #11
0
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    # Load the nodes into ironic
    import_json = os.path.expanduser('~/nodes.json')
    content = json.load(open(args.node_definition))
    for node in content['nodes']:
        for k in node.keys():
            if k in DOWNSTREAM_ATTRS:
                node.pop(k)
    with open(import_json, 'w') as out:
        json.dump(content, out)
    logger.info("Importing {} into ironic".format(args.node_definition))
    cmd = ["openstack", "overcloud", "node", "import", import_json]
    exit_code, stdin, stderr = Exec.execute_command(cmd)
    if exit_code != 0:
        logger.error("Failed to import nodes into ironic: {}, {}".format(
            stdin, stderr))
        sys.exit(1)

    # Load the instack file
    try:
        json_file = os.path.expanduser(args.node_definition)
        with open(json_file, 'r') as instackenv_json:
            instackenv = json.load(instackenv_json)
    except (IOError, ValueError):
        logger.exception("Failed to load node definition file {}".format(
                         args.node_definition))
        sys.exit(1)

    nodes = instackenv["nodes"]

    # Loop thru the nodes
    for node in nodes:
        # Find the node in ironic
        ironic_client = IronicHelper.get_ironic_client()
        ironic_node = IronicHelper.get_ironic_node(ironic_client,
                                                   node["pm_addr"])

        # Set the model and service tag on the node
        logger.info("Setting model ({}), service tag ({}), and provisioning "
                    "MAC ({}) on {}".format(
                        node["model"] if "model" in node else "None",
                        node["service_tag"],
                        node["provisioning_mac"] if "provisioning_mac" in
                        node else "None",
                        node["pm_addr"]))
        patch = [{'op': 'add',
                  'value': node["service_tag"],
                  'path': '/properties/service_tag'}]

        if "model" in node:
            patch.append({'op': 'add',
                          'value': node["model"],
                          'path': '/properties/model'})

        if "provisioning_mac" in node:
            patch.append({'op': 'add',
                          'value': node["provisioning_mac"],
                          'path': '/properties/provisioning_mac'})
            if utils.Utils.is_enable_routed_networks():
                logger.info("Adding port with physical address to node: %s",
                            str(ironic_node.uuid))
                kwargs = {'address': node["provisioning_mac"],
                          'physical_network': 'ctlplane',
                          'node_uuid': ironic_node.uuid}
                ironic_client.port.create(**kwargs)

        ironic_client.node.update(ironic_node.uuid, patch)
Пример #12
0
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"

    @classmethod
    def get_minimum_memory_size(self, node_type):
        try:
            memory_size = []
            for node in ConfigOvercloud.nodes:
                node_uuid = node.uuid
                # Get the details of a node
                node_details = ConfigOvercloud.ironic_client.node.get(
                    node_uuid)
                # Get the memory count or size
                memory_count = node_details.properties['memory_mb']
                # Get the type details of the node
                node_properties_capabilities = node_details.properties[
                    'capabilities'].split(',')[0].split(':')[1]
                if node_type in node_properties_capabilities:
                    memory_size.append(memory_count)
            return min(memory_size)
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to get memory size {}".format(message))

    @classmethod
    def calculate_hostos_cpus(self, number_of_host_os_cpu):
        try:
            global HOST_OS_CPUS, VCPUS, TOTAL_CPUS
            cpu_count_list = []
            for node in ConfigOvercloud.nodes:
                # for every compute node get the corresponding drac credentials
                # to fetch the cpu details
                node_uuid = node.uuid
                node_details = ConfigOvercloud.ironic_client.node.get(
                    node_uuid)
                node_type = node_details.properties['capabilities'].split(
                    ',')[0].split(':')[1]
                if 'compute' not in node_type:
                    # filter for getting compute node
                    continue
                drac_ip, drac_user, drac_password = \
                    ConfigOvercloud.get_drac_credential.get_drac_creds(
                        ConfigOvercloud.ironic_client, node_uuid)
                stor = client.DRACClient(drac_ip, drac_user, drac_password)
                # cpu socket information for every compute node
                sockets = stor.list_cpus()
                cpu_count = 0
                for socket in sockets:
                    if socket.ht_enabled:
                        cpu_count += socket.cores * 2
                    else:
                        raise Exception("Hyperthreading is not enabled in " +
                                        str(node_uuid))
                cpu_count_list.append(cpu_count)

            min_cpu_count = min(cpu_count_list)
            if min_cpu_count not in [40, 48, 56, 64, 72, 128]:
                raise Exception("The number of vCPUs, as specified in the"
                                " reference architecture, must be one of"
                                " [40, 48, 56, 64, 72, 128]"
                                " but number of vCPUs are " +
                                str(min_cpu_count))
            number_of_host_os_cpu = int(number_of_host_os_cpu)
            logger.info("host_os_cpus {}".format(
                cpu_siblings.sibling_info[min_cpu_count][number_of_host_os_cpu]
                ["host_os_cpu"]))
            logger.info(
                "vcpus {}".format(cpu_siblings.sibling_info[min_cpu_count]
                                  [number_of_host_os_cpu]["vcpu_pin_set"]))
            siblings_info = cpu_siblings.sibling_info[min_cpu_count][
                number_of_host_os_cpu]
            HOST_OS_CPUS = siblings_info["host_os_cpu"]
            VCPUS = siblings_info["vcpu_pin_set"]
            TOTAL_CPUS = min_cpu_count
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to calculate "
                            "Numa Vcpu list {}".format(message))

    @classmethod
    def calculate_hugepage_count(self, hugepage_size):
        try:
            memory_count = ConfigOvercloud.get_minimum_memory_size("compute")
            # RAM size should be more than 128G
            if memory_count < 128000:
                raise Exception("RAM size is less than 128GB"
                                "make sure to have all prerequisites")
            # Subtracting
            # 16384MB = (Host Memory 12GB + Kernel Memory 4GB)
            memory_count = (memory_count - 16384)
            if hugepage_size == "2MB":
                hugepage_count = (memory_count / 2)
            if hugepage_size == "1GB":
                hugepage_count = (memory_count / 1024)
            logger.info("hugepage_size {}".format(hugepage_size))
            logger.info("hugepage_count {}".format(hugepage_count))
            return hugepage_count
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to calculate"
                            " hugepage count {}".format(message))

    def edit_environment_files(self,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               nic_env_file,
                               mariadb_max_connections,
                               innodb_buffer_pool_size,
                               innodb_buffer_pool_instances,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               dell_compute_count=0):
        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend((
                'sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                str(dell_compute_count) + '|" ' + file_path,
                'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                str(controller_count) + '|" ' + file_path,
                'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                str(ceph_storage_count) + '|" ' + file_path,
                'sed -i "s|OvercloudControllerFlavor:.*|OvercloudControllerFlavor: '
                + str(controller_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudCephStorageFlavor:.*|OvercloudCephStorageFlavor: '
                + str(ceph_storage_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudSwiftStorageFlavor:.*|OvercloudSwiftStorageFlavor: '
                + str(swift_storage_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudBlockStorageFlavor:.*|OvercloudBlockStorageFlavor: '
                + str(block_storage_flavor) + '|" ' + file_path,
                'sed -i "s|NeutronNetworkVLANRanges:.*|NeutronNetworkVLANRanges: '
                + 'physint:' + str(vlan_range) + ',physext'
                '|" ' + file_path))

            if enable_hugepage:
                hpg_num = ConfigOvercloud.calculate_hugepage_count(
                    hugepage_size)
                hugecmd = 'default_hugepagesz=' + \
                    hugepage_size + ' hugepagesz=' + \
                    hugepage_size[0:-1]+' hugepages=' + \
                    str(hpg_num)+' iommu=pt intel_iommu=on'
                if not ovs_dpdk:
                    cmds.append('sed -i "s|HugepagesEnable.*|' +
                                'HugepagesEnable: true|" ' + file_path)
                    cmds.append("sed -i 's|HugePages:.*|HugePages: \"" +
                                hugecmd + "\"|' " + file_path)

            if enable_numa:
                ConfigOvercloud.calculate_hostos_cpus(hostos_cpu_count)
                if not ovs_dpdk:
                    cmds.append('sed -i "s|NumaEnable:.*|NumaEnable: true|" ' +
                                file_path)
                    cmds.append("sed -i 's|NumaCpus:.*|NumaCpus: " + VCPUS +
                                "|' " + file_path)
                    cmds.append('sed -i "s|  # NovaVcpuPinSet|  ' +
                                'NovaVcpuPinSet|" ' + file_path)
                    cmds.append(
                        "sed -i 's|NovaVcpuPinSet:.*|NovaVcpuPinSet: \"" +
                        VCPUS + "\"|' " + file_path)
            if ovs_dpdk:
                for each in re.split(r'[_/]', nic_env_file):
                    if each.find('mode') != -1:
                        ovs_dpdk_mode = each[-1:]
                siblings_info = cpu_siblings.sibling_info[TOTAL_CPUS][int(
                    hostos_cpu_count)]
                if ovs_dpdk_mode == '1':
                    pmd_cores = siblings_info["mode1_pmd_cores"]
                    pmd_rem_cores = siblings_info["mode1_rem_cores"]
                else:
                    pmd_cores = siblings_info["mode2_pmd_cores"]
                    pmd_rem_cores = siblings_info["mode2_rem_cores"]
                cmds.append(
                    'sed -i "s|NeutronDpdkCoreList:.*|NeutronDpdkCoreList: \\"'
                    + pmd_cores.join(["'", "'"]) + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|PmdRemCores:.*|PmdRemCores: "' +
                            pmd_rem_cores + '"|" ' + dpdk_file)
                cmds.append("sed -i 's|HugePages:.*|HugePages: \"" + hugecmd +
                            "\"|' " + dpdk_file)
                cmds += [
                    'sed -i "s|HostOsCpus:.*|HostOsCpus: "' + HOST_OS_CPUS +
                    '"|" ' + dpdk_file,
                    'sed -i "s|VcpuPinSet:.*|VcpuPinSet: "' + VCPUS + '"|" ' +
                    dpdk_file,
                ]

            # Performance and Optimization
            if innodb_buffer_pool_size != "dynamic":
                BufferPoolSize = int(innodb_buffer_pool_size.replace(
                    "G", "")) * 1024
                memory_mb = ConfigOvercloud.get_minimum_memory_size("control")
                if memory_mb < BufferPoolSize:
                    raise Exception("innodb_buffer_pool_size is greater than"
                                    " available memory size")
            cmds.append(
                'sed -i "s|MysqlMaxConnections.*|MysqlMaxConnections: ' +
                mariadb_max_connections + '|" ' + file_path)
            if ovs_dpdk:
                f_path = dpdk_file
            else:
                f_path = file_path
            cmds.append('sed -i "s|BufferPoolSize.*|BufferPoolSize: ' +
                        innodb_buffer_pool_size + '|" ' + f_path)
            cmds.append(
                'sed -i "s|BufferPoolInstances.*|BufferPoolInstances: ' +
                innodb_buffer_pool_instances + '|" ' + f_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")
            # Create host object
            host_obj = hosts.HostManager(nova)

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in host_obj.list():
                if "dell-compute" in host.host_name:
                    hostname = str(host.host_name)
                    dell_hosts.append(hostname)
            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")

    def edit_aggregate_environment_file(self, hostname_list):
        logger.info("Editing create aggregate environment file")
        file_path = home_dir \
            + '/pilot/templates/create_aggregate_environment.yaml'
        if not os.path.isfile(file_path):
            raise Exception(
                "The create_aggregate_environment.yaml file does not exist")
        cmd = ('sed -i "s|hosts:.*|hosts: ' + str(hostname_list) + '|" ' +
               file_path)

        status = os.system(cmd)
        logger.info("cmd: {}".format(cmd))
        if status != 0:
            raise Exception("Failed to execute the command {}"
                            " with error code {}".format(cmd, status))

    def create_aggregate(self):
        UC_AUTH_URL, UC_PROJECT_ID, UC_USERNAME, UC_PASSWORD = \
            CredentialHelper.get_overcloud_creds()
        # Create nova client object
        nova = nvclient.Client(2, UC_USERNAME, UC_PASSWORD, UC_PROJECT_ID,
                               UC_AUTH_URL)
        hostname_list = self.get_dell_compute_nodes_hostnames(nova)
        self.edit_aggregate_environment_file(hostname_list)
        env_opts = \
            " -e ~/pilot/templates/create_aggregate_environment.yaml"

        cmd = self.overcloudrc + "openstack stack create " \
            " Dell_Aggregate" \
            " --template" \
            " ~/pilot/templates/createaggregate.yaml" \
            " {}" \
            "".format(env_opts)
        aggregate_create_status = os.system(cmd)
        if aggregate_create_status == 0:
            logger.info("Dell_Aggregate created")
        else:
            raise Exception("Aggregate {} could not be created..."
                            " Exiting post deployment tasks")

    def post_deployment_tasks(self):
        try:
            logger.info("Initiating post deployment tasks")
            # create aggregate
            self.create_aggregate()
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception(message)
Пример #13
0
    def build_node_list(self):
        self.nodes = []

        # Pull in the nodes that nova doesn't know about in our json file
        for server_name in self.network_config["nodes"].keys():
            server = self.network_config["nodes"][server_name]
            node = self.Node(server_name, server["ip"], server["user"],
                             server["networks"])

            self.nodes.append(node)

        # Sort just these by name so the SAH/Director/Dashboard nodes come
        # first
        self.nodes.sort(key=lambda n: n.name)

        os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        kwargs = {
            'username': os_username,
            'password': os_password,
            'auth_url': os_auth_url,
            'project_id': os_tenant_name,
            'user_domain_name': os_user_domain_name,
            'project_domain_name': os_project_domain_name
        }
        auth = v3.Password(auth_url=auth_url,
                           username=os_username,
                           password=os_password,
                           project_name=os_tenant_name,
                           user_domain_name=os_user_domain_name,
                           project_domain_name=os_project_domain_name)

        sess = session.Session(auth=auth)
        nova = novaclient.Client('2', session=sess)

        ironic = IronicHelper.get_ironic_client()

        # Build up a map that maps flavor ids to flavor names
        flavor_map = {}
        flavors = nova.flavors.list(detailed=False)
        for flavor in flavors:
            flavor_map[flavor.id] = flavor.name

        logger.debug("flavor_map is:")
        for flavor in flavor_map.keys():
            logger.debug("    " + flavor + " => " + flavor_map[flavor])

        # Get the nodes from nova
        tmp_nodes = []
        nova_servers = nova.servers.list()
        for nova_server in nova_servers:
            flavor_name = None
            if nova_server.flavor["id"]:
                flavor_name = flavor_map[nova_server.flavor["id"]]
                if flavor_name == "baremetal":
                    flavor_name = None

            if not flavor_name:
                ironic_server = ironic.node.get_by_instance_uuid(
                    nova_server.id)
                capabilities = ironic_server.properties["capabilities"]

                match = re.search("node:([a-zA-Z-]+)-\d+", capabilities)
                if match:
                    flavor_name = match.group(1)
                else:
                    logger.error("Unable to find flavor name for "
                                 "node {}".format(nova_server.name))
                    sys.exit(1)

            # From the flavor, get the networks
            networks = self.network_config["flavors_to_networks"][flavor_name]

            node = self.Node(nova_server.name,
                             nova_server.networks["ctlplane"][0], "heat-admin",
                             networks)
            tmp_nodes.append(node)

        # Sort the overcloud nodes by name to group the role types together
        tmp_nodes.sort(key=lambda n: n.name)
        self.nodes.extend(tmp_nodes)
Пример #14
0
    def _get_nodes(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "/v3"

        provisioning_network = NetworkHelper.get_provisioning_network()

        kwargs = {'os_username': os_username,
              'os_password': os_password,
              'os_auth_url': os_auth_url,
              'os_tenant_name': os_tenant_name,
              'os_user_domain_name': os_user_domain_name,
              'os_project_domain_name': os_project_domain_name}
        i_client = IronicHelper.get_ironic_client()

        auth = v3.Password(
            auth_url=auth_url,
            username=os_username,
            password=os_password,
            project_name=os_tenant_name,
            user_domain_name=os_user_domain_name,
            project_domain_name=os_project_domain_name
        )

        sess = session.Session(auth=auth)
        n_client = nova_client.Client(2, session=sess)

        # Build up a dictionary that maps roles to a list of IPs for that role
        self.node_roles_to_nodes = {}

        self.logger.debug("Querying ironic and nova for nodes")
        nodes = i_client.node.list(fields=["uuid", "instance_uuid",
                                           "properties"])
        for node in nodes:
            uuid = node.uuid
            instance_uuid = node.instance_uuid

            # Handle the case where we have a node in ironic that's not in nova
            # (possibly due to the node being in maintenance mode in ironic or
            #  the user not assigning a role to a node, etc)
            if instance_uuid is None:
                self.logger.debug("Ironic node " + uuid + " has no "
                                  "corresponding instance in nova.  Skipping")
                continue

            capabilities = node.properties["capabilities"]
            capabilities = dict(c.split(':') for c in capabilities.split(','))

            # Role is the 'profile' capability when node placement is not
            # in use. Otherwise it's encoded in the 'node' capability.
            if 'profile' in capabilities:
                role = capabilities['profile']
            elif 'node' in capabilities:
                role = capabilities['node']
                # Trim the trailing "-N" where N is the node number
                role = role[:role.rindex('-')]
            else:
                self.logger.error("Failed to determine role of node {}".format(
                    node))
                sys.exit(1)

            server = n_client.servers.get(instance_uuid)
            for address in server.addresses["ctlplane"]:
                ip = address["addr"]
                if IPAddress(ip) in provisioning_network:
                    break

            self.logger.debug("Got node:\n"
                              "    uuid=" + uuid + "\n"
                              "    ip=" + ip + "\n"
                              "    role=" + role + "\n"
                              "    instance_uuid=" + instance_uuid)

            if role not in self.node_roles_to_nodes:
                self.node_roles_to_nodes[role] = []

            self.node_roles_to_nodes[role].append(ip)

        self.logger.debug("node_roles_to_nodes: " +
                          str(self.node_roles_to_nodes))