Example #1
0
def get_ceph_nodes(username):
    """ Returns a list of Ceph nodes (Monitors and OSD nodes)

    Scans the list of servers in the overcloud, and determines whether a
    server is a "Ceph node" by searching for the presence of specific Ceph
    process names.
    """

    LOG.info("Identifying Ceph nodes (Monitor and OSD nodes)")

    os_auth_url, os_tenant_name, os_username, os_password = \
        CredentialHelper.get_undercloud_creds()

    nova = nova_client.Client(2, os_username, os_password, os_tenant_name,
                              os_auth_url)

    ceph_nodes = []
    for server in nova.servers.list():
        # Use the first "ctlplane" (provisioning network) address
        address = server.addresses["ctlplane"][0]["addr"]
        node = Node(address, username)
        node.initialize()

        # Identify Ceph nodes by looking for Ceph monitor or OSD processes.
        # If there are none then it's not a Ceph node.
        ceph_procs = node.run("pgrep -l 'ceph-[mon\|osd]'", check_status=False)
        if ceph_procs:
            LOG.info("{} ({}) is a Ceph node".format(node.fqdn,
                                                     node.storage_ip))
            ceph_nodes.append(node)
        else:
            LOG.debug("{} ({}) is not a Ceph node".format(
                node.fqdn, node.storage_ip))

    return sorted(ceph_nodes, key=lambda node: node.fqdn)
Example #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--power",
                        required=True,
                        default=None,
                        choices=["on", "off", "reset", "cycle"],
                        help="Control power state of all overcloud nodes")
    args = parser.parse_args()

    os_auth_url, os_tenant_name, os_username, os_password, \
    os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_undercloud_creds()

    kwargs = {
        'os_username': os_username,
        'os_password': os_password,
        'os_auth_url': os_auth_url,
        'os_tenant_name': os_tenant_name,
        'os_user_domain_name': os_user_domain_name,
        'os_project_domain_name': os_project_domain_name
    }
    ironic = client.get_client(1, **kwargs)

    for node in ironic.node.list(detail=True):
        ip, username, password = \
            CredentialHelper.get_drac_creds_from_node(node)

        cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis power {}". \
            format(ip, username, password, args.power)
        print cmd
        os.system(cmd)
Example #3
0
    def get_ironic_client():
        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        kwargs = {'os_username': os_username,
                  'os_password': os_password,
                  'os_auth_url': os_auth_url,
                  'os_tenant_name': os_tenant_name,
                  'os_ironic_api_version': '1.15'}
        return ironicclient.client.get_client(1, **kwargs)
Example #4
0
def get_ceph_nodes(username):
    """ Returns a list of Ceph nodes (Monitors and OSD nodes)

    Scans the list of servers in the overcloud, and determines whether a
    server is a "Ceph node" by searching for the presence of specific Ceph
    process names.
    """

    LOG.info("Identifying Ceph nodes (Monitor and OSD nodes)")

    os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_undercloud_creds()
    auth_url = os_auth_url + "v3"

    auth = v3.Password(auth_url=auth_url,
                       username=os_username,
                       password=os_password,
                       project_name=os_tenant_name,
                       user_domain_name=os_user_domain_name,
                       project_domain_name=os_project_domain_name)

    sess = session.Session(auth=auth)
    nova = nova_client.Client(2, session=sess)

    ceph_nodes = []
    for server in nova.servers.list():
        # Use the first "ctlplane" (provisioning network) address
        address = server.addresses["ctlplane"][0]["addr"]
        node = Node(address, username)
        node.initialize()

        # Identify Ceph nodes by looking for Ceph monitor or OSD processes.
        # If there are none then it's not a Ceph node.
        ceph_procs = node.run(
            "pgrep -l 'ceph-[mon\|osd]'",  # noqa: W605
            check_status=False)
        if ceph_procs:
            # A small cheat below strips domain name from node name
            # as it is not ever used to coonfigure the dashboard
            node.fqdn = node.fqdn + ".mydomain"
            # if (node.fqdn.find(".")):
            if ('.' in node.fqdn):
                node.fqdn, domain_name = node.fqdn.split('.', 1)
            else:
                node.fqdn = node.fqdn
            LOG.info("{} ({}) is a Ceph node".format(node.fqdn,
                                                     node.storage_ip))
            ceph_nodes.append(node)
        else:
            LOG.debug("{} ({}) is not a Ceph node".format(
                node.fqdn, node.storage_ip))

    return sorted(ceph_nodes, key=lambda node: node.fqdn)
Example #5
0
def main():
    os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_undercloud_creds()
    auth_url = os_auth_url + "v3"

    kwargs = {'os_username': os_username,
              'os_password': os_password,
              'os_auth_url': os_auth_url,
              'os_tenant_name': os_tenant_name,
              'os_user_domain_name': os_user_domain_name,
              'os_project_domain_name': os_project_domain_name}
    ironic = ironicclient.client.get_client(1, **kwargs)
    nodes = ironic.node.list(detail=True)

    auth = v3.Password(
        auth_url=auth_url,
        username=os_username,
        password=os_password,
        project_name=os_tenant_name,
        user_domain_name=os_user_domain_name,
        project_domain_name=os_project_domain_name
    )

    sess = session.Session(auth=auth)
    nova = novaclient.Client('2', session=sess)

    # Slightly odd syntax for declaring 'banner' reduces the line length
    banner = (
        "+-----------------+---------------------------+-----------------+"
    )
    nodeinfo = "| {:<15} | {:<25} | {:<15} |"
    print banner
    print nodeinfo.format('iDRAC Addr', 'Node Name', 'Provision Addr')
    print banner
    # Display the list ordered by the iDRAC address
    for n in sorted(nodes, key=lambda x: CredentialHelper.get_drac_ip(x)):
        idrac_addr = CredentialHelper.get_drac_ip(n)

        if 'display_name' in n.instance_info:
            node_name = n.instance_info['display_name']
        else:
            node_name = 'None'

        prov_addr = 'None'
        if n.instance_uuid:
            nova_ips = nova.servers.ips(n.instance_uuid)
            if nova_ips and 'ctlplane' in nova_ips:
                prov_addr = nova_ips['ctlplane'][0]['addr']

        print nodeinfo.format(idrac_addr, node_name, prov_addr)
    print banner
def main():
    args = parse_arguments()

    LoggingHelper.configure_logging(args.logging_level)

    ironic_client = IronicHelper.get_ironic_client()

    for node in ironic_client.node.list(detail=True):
        ip, username, password = \
            CredentialHelper.get_drac_creds_from_node(node)

        # Power off the node
        cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \
            "power off".format(ip, username, password)
        logger.info("Powering off {}".format(ip))
        logger.debug("    {}".format(cmd))
        os.system(cmd)

        # Set the first boot device to PXE
        cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \
            "bootdev pxe options=persistent".format(ip, username, password)
        logger.info(
            "Setting the provisioning NIC to PXE boot on {}".format(ip))
        logger.debug("    {}".format(cmd))
        os.system(cmd)

    if not args.skip:
        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        cmd = "openstack baremetal configure boot " \
            "--os-auth-url {} " \
            "--os-project-name {} " \
            "--os-username {} " \
            "--os-password {} " \
            "".format(os_auth_url,
                      os_tenant_name,
                      os_username,
                      os_password)

        logger.info("Assigning the kernel and ramdisk image to all nodes")
        logger.debug(cmd)
        os.system(cmd)
Example #7
0
    def get_inspector_client(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        kwargs = {
            'username': os_username,
            'password': os_password,
            'auth_url': os_auth_url,
            'project_id': os_tenant_name,
            'user_domain_name': os_user_domain_name,
            'project_domain_name': os_project_domain_name
        }
        auth = v3.Password(auth_url=auth_url,
                           username=os_username,
                           password=os_password,
                           project_name=os_tenant_name,
                           user_domain_name=os_user_domain_name,
                           project_domain_name=os_project_domain_name)
        sess = session.Session(auth=auth)
        self.inspector = ironic_inspector_client.ClientV1(session=sess)
Example #8
0
    def _get_nodes(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        provisioning_network = NetworkHelper.get_provisioning_network()

        kwargs = {
            'os_username': os_username,
            'os_password': os_password,
            'os_auth_url': os_auth_url,
            'os_tenant_name': os_tenant_name,
            'os_user_domain_name': os_user_domain_name,
            'os_project_domain_name': os_project_domain_name
        }
        i_client = ironic_client.get_client(1, **kwargs)

        auth = v3.Password(auth_url=auth_url,
                           username=os_username,
                           password=os_password,
                           project_name=os_tenant_name,
                           user_domain_name=os_user_domain_name,
                           project_domain_name=os_project_domain_name)

        sess = session.Session(auth=auth)
        n_client = nova_client.Client(2, session=sess)

        # Build up a dictionary that maps roles to a list of IPs for that role
        self.node_roles_to_nodes = {}

        self.logger.debug("Querying ironic and nova for nodes")
        nodes = i_client.node.list(
            fields=["uuid", "instance_uuid", "properties"])
        for node in nodes:
            uuid = node.uuid
            instance_uuid = node.instance_uuid

            # Handle the case where we have a node in ironic that's not in nova
            # (possibly due to the node being in maintenance mode in ironic or
            #  the user not assigning a role to a node, etc)
            if instance_uuid is None:
                self.logger.debug("Ironic node " + uuid + " has no "
                                  "corresponding instance in nova.  Skipping")
                continue

            capabilities = node.properties["capabilities"]
            capabilities = dict(c.split(':') for c in capabilities.split(','))

            # Role is the 'profile' capability when node placement is not
            # in use. Otherwise it's encoded in the 'node' capability.
            if 'profile' in capabilities:
                role = capabilities['profile']
            elif 'node' in capabilities:
                role = capabilities['node']
                # Trim the trailing "-N" where N is the node number
                role = role[:role.rindex('-')]
            else:
                self.logger.error(
                    "Failed to determine role of node {}".format(node))
                sys.exit(1)

            server = n_client.servers.get(instance_uuid)
            for address in server.addresses["ctlplane"]:
                ip = address["addr"]
                if IPAddress(ip) in provisioning_network:
                    break

            self.logger.debug("Got node:\n"
                              "    uuid=" + uuid + "\n"
                              "    ip=" + ip + "\n"
                              "    role=" + role + "\n"
                              "    instance_uuid=" + instance_uuid)

            if role not in self.node_roles_to_nodes:
                self.node_roles_to_nodes[role] = []

            self.node_roles_to_nodes[role].append(ip)

        self.logger.debug("node_roles_to_nodes: " +
                          str(self.node_roles_to_nodes))
Example #9
0
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--dell-computes",
                            dest="num_dell_computes",
                            type=int,
                            required=True,
                            help="The number of dell compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")

        parser.add_argument("--enable_hugepages",
                            action='store_true',
                            default=False,
                            help="Enable/Disable hugepages feature")
        parser.add_argument("--enable_numa",
                            action='store_true',
                            default=False,
                            help="Enable/Disable numa feature")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--nic_env_file",
                            default="5_port/nic_environment.yaml",
                            help="The NIC environment file to use")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timeout",
                            default="120",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument("--hugepages_size",
                            dest="hugepages_size",
                            required=False,
                            default="1GB",
                            help="HugePages size")
        parser.add_argument("--hostos_cpu_count",
                            dest="hostos_cpu_count",
                            required=False,
                            default="4",
                            help="HostOs Cpus to be configured")
        parser.add_argument("--mariadb_max_connections",
                            dest="mariadb_max_connections",
                            required=False,
                            default="15360",
                            help="Maximum number of connections for MariaDB")
        parser.add_argument("--innodb_buffer_pool_size",
                            dest="innodb_buffer_pool_size",
                            required=False,
                            default="dynamic",
                            help="InnoDB buffer pool size")
        parser.add_argument("--innodb_buffer_pool_instances",
                            dest="innodb_buffer_pool_instances",
                            required=False,
                            default="16",
                            help="InnoDB buffer pool instances.")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--dvr_enable',
                            action='store_true',
                            default=False,
                            help="Enables Distributed Virtual Routing")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--ovs_dpdk',
                            action='store_true',
                            default=False,
                            help="Enable OVS+DPDK")
        parser.add_argument('--sriov',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        parser.add_argument("--mtu",
                            dest="mtu",
                            type=int,
                            required=True,
                            default=1500,
                            help="Tenant Network MTU")
        LoggingHelper.add_argument(parser)
        args = parser.parse_args()
        LoggingHelper.configure_logging(args.logging_level)
        p = re.compile('\d+:\d+')  # noqa: W605
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Validate that the NIC envronment file exists
        nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs",
                                    args.nic_env_file)
        if not os.path.isfile(nic_env_file):
            raise ValueError("\nError: The nic_env_file {} does not "
                             "exist!".format(nic_env_file))

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        # logger.info("Applying patches to director...")
        # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        # status = os.system(cmd)
        # if status != 0:
        #    raise ValueError("\nError: {} failed, unable to continue.  See "
        #                     "the comments in that file for additional "
        #                     "information".format(cmd))
        # Pass the parameters required by puppet which will be used
        # to enable/disable dell nfv features
        # Edit the dellnfv_environment.yaml
        # If disabled, default values will be set and
        # they won't be used for configuration
        # Create ConfigOvercloud object
        config = ConfigOvercloud(args.overcloud_name)
        # Remove this when Numa siblings added
        # Edit the dellnfv_environment.yaml
        config.edit_environment_files(
            args.mtu, args.enable_hugepages, args.enable_numa,
            args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk,
            args.sriov, nic_env_file, args.mariadb_max_connections,
            args.innodb_buffer_pool_size, args.innodb_buffer_pool_instances,
            args.num_controllers, args.num_storage, control_flavor,
            ceph_storage_flavor, swift_storage_flavor, block_storage_flavor,
            args.vlan_range, args.num_dell_computes)

        # Launch the deployment

        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        # The roles_data.yaml must be included at the beginning.
        # This is needed to enable the custome role Dell Compute.
        # It overrides the default roles_data.yaml
        env_opts = "-r ~/pilot/templates/roles_data.yaml"

        # The network-environment.yaml must be included after the
        # network-isolation.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "network-isolation.yaml" \
                    " -e ~/pilot/templates/network-environment.yaml" \
                    " -e {}" \
                    " -e ~/pilot/templates/ceph-osd-config.yaml" \
                    "".format(nic_env_file)

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        # The neutron-ovs-dvr.yaml.yaml must be included after the
        # network-environment.yaml
        if args.dvr_enable:
            env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "storage-environment.yaml" \
                    " -e ~/overcloud_images.yaml" \
                    " -e ~/pilot/templates/dell-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "puppet-pacemaker.yaml"
        host_config = False
        if args.enable_hugepages or args.enable_numa:
            env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                        "host-config-and-reboot.yaml"
            host_config = True
        if args.ovs_dpdk:
            if not args.enable_hugepages or not args.enable_numa:
                raise ValueError("Both hugepages and numa must be" +
                                 "enabled in order to use OVS-DPDK")
            else:
                env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml"

        if args.sriov:
            env_opts += " -e ~/pilot/templates/neutron-sriov.yaml"
            env_opts += " -e ~/pilot/templates/ovs-hw-offload.yaml"
            if not host_config:
                env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                            "host-config-and-reboot.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dell-cinder-backends.yaml"

        cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " -e /usr/share/openstack-tripleo-heat-templates/" \
              "environments/ceph-ansible/ceph-ansible.yaml" \
              " -e /usr/share/openstack-tripleo-heat-templates/" \
              "environments/ceph-ansible/ceph-rgw.yaml" \
              " {}" \
              " --libvirt-type kvm" \
              " --ntp-server {}" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        args.ntp_server_fqdn,
                        )

        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        print cmd
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        logger.info('\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start))))
        logger.info('Fetching SSH keys...')

        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
            logger.info("\nDeployment Completed")
        else:
            horizon_url = None

        logger.info('Overcloud nodes:')
        identify_nodes()

        if horizon_url:
            logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url))
    except Exception as err:
        print >> sys.stderr, err
        sys.exit(1)
Example #10
0
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--dell-computes",
                            dest="num_dell_computes",
                            type=int,
                            required=True,
                            help="The number of dell compute nodes")
        parser.add_argument("--dell-computeshci",
                            dest="num_dell_computeshci",
                            type=int,
                            required=True,
                            help="The number of dell hci compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")
        parser.add_argument("--powerflex",
                            dest="num_powerflex",
                            type=int,
                            required=True,
                            help="The number of powerflex storage nodes")
        parser.add_argument("--enable_hugepages",
                            action='store_true',
                            default=False,
                            help="Enable/Disable hugepages feature")
        parser.add_argument("--enable_numa",
                            action='store_true',
                            default=False,
                            help="Enable/Disable numa feature")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--nic_env_file",
                            default="5_port/nic_environment.yaml",
                            help="The NIC environment file to use")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timezone",
                            dest="time_zone",
                            default="America/Chicago",
                            help="The timezone to use")
        parser.add_argument("--timeout",
                            default="300",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument("--hugepages_size",
                            dest="hugepages_size",
                            required=False,
                            default="1GB",
                            help="HugePages size")
        parser.add_argument("--hostos_cpu_count",
                            dest="hostos_cpu_count",
                            required=False,
                            default="4",
                            help="HostOs Cpus to be configured")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--enable_unity',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Unity backend")
        parser.add_argument('--enable_unity_manila',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Unity Manila backend")
        parser.add_argument('--enable_powermax',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Powermax backend")
        parser.add_argument('--powermax_protocol',
                            dest='powermax_protocol',
                            required=False,
                            default="iSCSI",
                            help="Dell EMC Powermax Protocol - iSCSI or FC")
        parser.add_argument('--enable_powermax_manila',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC PowerMax  Manila backend")
        parser.add_argument('--enable_powerstore',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Powerstore backend")

        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--octavia_enable',
                            action='store_true',
                            default=False,
                            help="Enables Octavia Load Balancer")
        parser.add_argument('--octavia_user_certs_keys',
                            action='store_true',
                            default=False,
                            help="Enables Octavia Load Balancer with "
                            "user provided certs and keys")
        parser.add_argument('--dvr_enable',
                            action='store_true',
                            default=False,
                            help="Enables Distributed Virtual Routing")
        parser.add_argument('--barbican_enable',
                            action='store_true',
                            default=False,
                            help="Enables Barbican key manager")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--ovs_dpdk',
                            action='store_true',
                            default=False,
                            help="Enable OVS+DPDK")
        parser.add_argument('--sriov',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV")
        parser.add_argument('--hw_offload',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV Offload")
        parser.add_argument('--sriov_interfaces',
                            dest="sriov_interfaces",
                            default=False,
                            help="SR-IOV interfaces count")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        parser.add_argument("--mtu",
                            dest="mtu",
                            type=int,
                            required=True,
                            default=1500,
                            help="Tenant Network MTU")
        parser.add_argument("--dashboard_enable",
                            action='store_true',
                            default=False,
                            help="Enable the ceph dashboard deployment")
        parser.add_argument('--network_data',
                            action='store_true',
                            default=False,
                            help="Use network_data.yaml to create edge site "
                            "networks")

        LoggingHelper.add_argument(parser)
        args = parser.parse_args()
        LoggingHelper.configure_logging(args.logging_level)
        p = re.compile('\d+:\d+')  # noqa: W605
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Validate that the NIC envronment file exists
        nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs",
                                    args.nic_env_file)
        if not os.path.isfile(nic_env_file):
            raise ValueError("\nError: The nic_env_file {} does not "
                             "exist!".format(nic_env_file))

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        # logger.info("Applying patches to director...")
        # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        # status = os.system(cmd)
        # if status != 0:
        #    raise ValueError("\nError: {} failed, unable to continue.  See "
        #                     "the comments in that file for additional "
        #                     "information".format(cmd))
        # Pass the parameters required by puppet which will be used
        # to enable/disable dell nfv features
        # Edit the dellnfv_environment.yaml
        # If disabled, default values will be set and
        # they won't be used for configuration
        # Create ConfigOvercloud object
        print("Configure environment file")
        config = ConfigOvercloud(args.overcloud_name)
        # Remove this when Numa siblings added
        # Edit the dellnfv_environment.yaml
        config.edit_environment_files(
            args.mtu, args.enable_hugepages, args.enable_numa,
            args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk,
            args.sriov, args.hw_offload, args.sriov_interfaces, nic_env_file,
            args.num_controllers, args.num_storage, control_flavor,
            ceph_storage_flavor, swift_storage_flavor, block_storage_flavor,
            args.vlan_range, args.time_zone, args.num_dell_computes,
            args.num_dell_computeshci, args.num_powerflex)

        # Launch the deployment
        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        env_opts = ""
        # If there are edge sites we have to use network_data.yaml and
        # it must in as first argument.
        if args.network_data:
            env_opts += "-n ~/pilot/templates/network_data.yaml "
        # The roles_data.yaml must be included at the beginning.
        # This is needed to enable the custom role Dell Compute.
        # It overrides the default roles_data.yaml
        env_opts += "-r ~/pilot/templates/roles_data.yaml"

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        # The configure-barbican.yaml must be included after the
        # network-environment.yaml
        if args.barbican_enable:
            env_opts += " -e ~/pilot/templates/configure-barbican.yaml"

        # The octavia.yaml must be included after the
        # network-environment.yaml
        if args.octavia_enable:
            env_opts += " -e ~/pilot/templates/octavia.yaml"
            if args.octavia_user_certs_keys is True:
                env_opts += " -e ~/pilot/templates/cert_keys.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The neutron-ovs.yaml must be included before dell-environment.yaml to enable ovs and disable ovn
        # in OSP16.1. In case we need to use OVN in future, please delete this line
        env_opts += " -e ~/pilot/templates/overcloud/environments/services/neutron-ovs.yaml"

        # The neutron-ovs-dvr.yaml.yaml must be included after the
        # neutron-ovs.yaml
        if args.dvr_enable:
            env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        if args.num_powerflex > 0:
            env_opts += " -e ~/containers-prepare-parameter.yaml" \
                        " -e ~/pilot/templates/dell-environment.yaml"
        else:
            env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                        "storage-environment.yaml" \
                        " -e ~/containers-prepare-parameter.yaml" \
                        " -e ~/pilot/templates/dell-environment.yaml"

        if args.ovs_dpdk:
            if not args.enable_hugepages or not args.enable_numa:
                raise ValueError("Both hugepages and numa must be" +
                                 "enabled in order to use OVS-DPDK")
            else:
                env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml"

        if args.sriov:
            if not args.enable_numa:
                raise ValueError("Numa cpu pinning must be " +
                                 "enabled in order to use SRIOV")
            else:
                env_opts += " -e ~/pilot/templates/neutron-sriov.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dellsc-cinder-config.yaml"

        if args.enable_unity:
            env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \
                        "container.yaml"
            env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \
                        "backend.yaml"

        if args.enable_unity_manila:
            env_opts += " -e ~/pilot/templates/unity-manila-container.yaml"
            env_opts += " -e ~/pilot/templates/unity-manila-config.yaml"

        if args.enable_powermax:
            if args.powermax_protocol == "iSCSI":
                env_opts += " -e ~/pilot/templates/dellemc-powermax-iscsi-cinder-" \
                         "backend.yaml"
            else:
                env_opts += " -e ~/pilot/templates/dellemc-powermax-fc-cinder-" \
                         "backend.yaml"
        if args.enable_powermax_manila:
            env_opts += " -e ~/pilot/templates/powermax-manila-config.yaml"

        if args.enable_powerstore:
            env_opts += " -e ~/pilot/templates/dellemc-powerstore-cinder-backend.yaml"

        if args.num_powerflex > 0:
            env_opts += " -e ~/pilot/templates/overcloud/environments/powerflex-ansible/powerflex-ansible.yaml"
            env_opts += " -e ~/pilot/templates/dellemc-powerflex-cinder-backend.yaml"
            env_opts += " -e ~/pilot/templates/custom-dellemc-volume-mappings.yaml"
        else:
            env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml" \
                        " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-rgw.yaml"

        if args.dashboard_enable:
            env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-dashboard.yaml"
            env_opts += " -e ~/pilot/templates/ceph_dashboard_admin.yaml"

        # The network-environment.yaml must be included after other templates
        # for effective parameter overrides (External vlan default route)
        # The network-environment.yaml must be included after the network-isolation.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "network-isolation.yaml" \
                    " -e ~/pilot/templates/network-environment.yaml" \
                    " -e {} " \
                    "-e ~/pilot/templates/site-name.yaml".format(nic_env_file)

        cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " {}" \
              " --libvirt-type kvm" \
              " --no-cleanup" \
              " --ntp-server {}" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        args.ntp_server_fqdn,
                        )
        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        logger.info('\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start))))
        logger.info('Fetching SSH keys...')

        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
            logger.info("\nDeployment Completed")
        else:
            horizon_url = None

        logger.info('Overcloud nodes:')
        identify_nodes()

        if horizon_url:
            logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url))
    except Exception as err:
        print(sys.stderr, err)
        raise
        sys.exit(1)
Example #11
0
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--computes",
                            dest="num_computes",
                            type=int,
                            required=True,
                            help="The number of compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timeout",
                            default="120",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        args = parser.parse_args()
        p = re.compile('\d+:\d+')
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")

        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        compute_flavor = "compute"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            compute_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        print 'Applying patches to director...'
        cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        status = os.system(cmd)
        if status != 0:
            raise ValueError("\nError: {} failed, unable to continue.  See "
                             "the comments in that file for additional "
                             "information".format(cmd))

        # Launch the deployment

        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        # The network-environment.yaml must be included after the
        # network-isolation.yaml
        env_opts = "-e ~/pilot/templates/overcloud/environments/" \
                   "network-isolation.yaml" \
                   " -e ~/pilot/templates/network-environment.yaml" \
                   " -e ~/pilot/templates/ceph-osd-config.yaml"

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "storage-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "ceph-radosgw.yaml" \
                    " -e ~/pilot/templates/dell-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "puppet-pacemaker.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dell-cinder-backends.yaml"

        cmd = "cd ; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " {}" \
              " --control-flavor {}" \
              " --compute-flavor {}" \
              " --ceph-storage-flavor {}" \
              " --swift-storage-flavor {}" \
              " --block-storage-flavor {}" \
              " --neutron-public-interface bond1" \
              " --neutron-network-type vlan" \
              " --neutron-disable-tunneling" \
              " --libvirt-type kvm" \
              " --os-auth-url {}" \
              " --os-project-name {}" \
              " --os-user-id {}" \
              " --os-password {}" \
              " --control-scale {}" \
              " --compute-scale {}" \
              " --ceph-storage-scale {}" \
              " --ntp-server {}" \
              " --neutron-network-vlan-ranges physint:{},physext" \
              " --neutron-bridge-mappings physint:br-tenant,physext:br-ex" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        control_flavor,
                        compute_flavor,
                        ceph_storage_flavor,
                        swift_storage_flavor,
                        block_storage_flavor,
                        os_auth_url,
                        os_tenant_name,
                        os_username,
                        os_password,
                        args.num_controllers,
                        args.num_computes,
                        args.num_storage,
                        args.ntp_server_fqdn,
                        args.vlan_range)

        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        print cmd
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        print '\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start)))
        print 'Fetching SSH keys...'
        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
        else:
            horizon_url = None
        print 'Overcloud nodes:'
        identify_nodes()

        if horizon_url:
            print '\nHorizon Dashboard URL: {}\n'.format(horizon_url)
    except ValueError as err:
        print >> sys.stderr, err
        sys.exit(1)
Example #12
0
    def build_node_list(self):
        self.nodes = []

        # Pull in the nodes that nova doesn't know about in our json file
        for server_name in self.network_config["nodes"].keys():
            server = self.network_config["nodes"][server_name]
            node = self.Node(server_name, server["ip"], server["user"],
                             server["networks"])

            self.nodes.append(node)

        # Sort just these by name so the SAH/Director/Dashboard nodes come
        # first
        self.nodes.sort(key=lambda n: n.name)

        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        kwargs = {
            'os_username': os_username,
            'os_password': os_password,
            'os_auth_url': os_auth_url,
            'os_tenant_name': os_tenant_name
        }

        nova = nova_client.Client(
            '2',  # API version
            os_username,
            os_password,
            os_tenant_name,
            os_auth_url)

        ironic = ironicclient.client.get_client(1, **kwargs)

        # Build up a map that maps flavor ids to flavor names
        flavor_map = {}
        flavors = nova.flavors.list()
        for flavor in flavors:
            flavor_map[flavor.id] = flavor.name

        logger.debug("flavor_map is:")
        for flavor in flavor_map.keys():
            logger.debug("    " + flavor + " => " + flavor_map[flavor])

        # Get the nodes from nova
        tmp_nodes = []
        nova_servers = nova.servers.list()
        for nova_server in nova_servers:
            flavor_name = None
            if nova_server.flavor["id"]:
                flavor_name = flavor_map[nova_server.flavor["id"]]
                if flavor_name == "baremetal":
                    flavor_name = None

            if not flavor_name:
                ironic_server = ironic.node.get_by_instance_uuid(
                    nova_server.id)
                capabilities = ironic_server.properties["capabilities"]

                match = re.search("node:([a-zA-Z-]+)-\d+", capabilities)
                if match:
                    flavor_name = match.group(1)
                else:
                    logger.error("Unable to find flavor name for "
                                 "node {}".format(nova_server.name))
                    sys.exit(1)

            # From the flavor, get the networks
            networks = self.network_config["flavors_to_networks"][flavor_name]

            node = self.Node(nova_server.name,
                             nova_server.networks["ctlplane"][0], "heat-admin",
                             networks)
            tmp_nodes.append(node)

        # Sort the overcloud nodes by name to group the role types together
        tmp_nodes.sort(key=lambda n: n.name)
        self.nodes.extend(tmp_nodes)
Example #13
0
def create_flavors():
    logger.info("Creating overcloud flavors...")

    flavors = [{
        "id": "1",
        "name": "m1.tiny",
        "memory": 512,
        "disk": 1,
        "cpus": 1
    }, {
        "id": "2",
        "name": "m1.small",
        "memory": 2048,
        "disk": 20,
        "cpus": 1
    }, {
        "id": "3",
        "name": "m1.medium",
        "memory": 4096,
        "disk": 40,
        "cpus": 2
    }, {
        "id": "4",
        "name": "m1.large",
        "memory": 8192,
        "disk": 80,
        "cpus": 4
    }, {
        "id": "5",
        "name": "m1.xlarge",
        "memory": 16384,
        "disk": 160,
        "cpus": 8
    }]

    os_auth_url, os_tenant_name, os_username, os_password, \
    os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_undercloud_creds()

    kwargs = {
        'username': os_username,
        'password': os_password,
        'auth_url': os_auth_url,
        'project_id': os_tenant_name,
        'user_domain_name': os_user_domain_name,
        'project_domain_name': os_project_domain_name
    }
    n_client = nova_client.Client(2, **kwargs)

    existing_flavor_ids = []
    for existing_flavor in n_client.flavors.list(detailed=False):
        existing_flavor_ids.append(existing_flavor.id)

    for flavor in flavors:
        if flavor["id"] not in existing_flavor_ids:
            print '    Creating ' + flavor["name"]
            n_client.flavors.create(flavor["name"],
                                    flavor["memory"],
                                    flavor["cpus"],
                                    flavor["disk"],
                                    flavorid=flavor["id"])
        else:
            print '    Flavor ' + flavor["name"] + " already exists"