コード例 #1
0
def delete_compute_nodes_resources(first_controller_node_ip):
    LOG.info("Delete the compute node resources within pacemaker.")

    ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs resource delete neutron-openvswitch-agent-compute \
            --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs stonith delete fence-nova --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete libvirtd-compute --force")

    # Then the nova-compute resource:

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete nova-compute-checkevacuate --force")

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs resource delete nova-compute --force")
コード例 #2
0
    def get_inspector_client(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        kwargs = {
                'username': os_username,
                'password': os_password,
                'auth_url': os_auth_url,
                'project_id': os_tenant_name,
                'user_domain_name': os_user_domain_name,
                'project_domain_name': os_project_domain_name
                }
        auth = v3.Password(
            auth_url=auth_url,
            username=os_username,
            password=os_password,
            project_name=os_tenant_name,
            user_domain_name=os_user_domain_name,
            project_domain_name=os_project_domain_name
            )
        sess = session.Session(auth=auth)
        self.inspector = ironic_inspector_client.ClientV1(session=sess)
コード例 #3
0
ファイル: deploy-overcloud.py プロジェクト: uday344/JetPack
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--dell-computes",
                            dest="num_dell_computes",
                            type=int,
                            required=True,
                            help="The number of dell compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")

        parser.add_argument("--enable_hugepages",
                            action='store_true',
                            default=False,
                            help="Enable/Disable hugepages feature")
        parser.add_argument("--enable_numa",
                            action='store_true',
                            default=False,
                            help="Enable/Disable numa feature")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--nic_env_file",
                            default="5_port/nic_environment.yaml",
                            help="The NIC environment file to use")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timeout",
                            default="120",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument("--hugepages_size",
                            dest="hugepages_size",
                            required=False,
                            default="1GB",
                            help="HugePages size")
        parser.add_argument("--hostos_cpu_count",
                            dest="hostos_cpu_count",
                            required=False,
                            default="4",
                            help="HostOs Cpus to be configured")
        parser.add_argument("--mariadb_max_connections",
                            dest="mariadb_max_connections",
                            required=False,
                            default="15360",
                            help="Maximum number of connections for MariaDB")
        parser.add_argument("--innodb_buffer_pool_size",
                            dest="innodb_buffer_pool_size",
                            required=False,
                            default="dynamic",
                            help="InnoDB buffer pool size")
        parser.add_argument("--innodb_buffer_pool_instances",
                            dest="innodb_buffer_pool_instances",
                            required=False,
                            default="16",
                            help="InnoDB buffer pool instances.")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--dvr_enable',
                            action='store_true',
                            default=False,
                            help="Enables Distributed Virtual Routing")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--ovs_dpdk',
                            action='store_true',
                            default=False,
                            help="Enable OVS+DPDK")
        parser.add_argument('--sriov',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        parser.add_argument("--mtu",
                            dest="mtu",
                            type=int,
                            required=True,
                            default=1500,
                            help="Tenant Network MTU")
        LoggingHelper.add_argument(parser)
        args = parser.parse_args()
        LoggingHelper.configure_logging(args.logging_level)
        p = re.compile('\d+:\d+')  # noqa: W605
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Validate that the NIC envronment file exists
        nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs",
                                    args.nic_env_file)
        if not os.path.isfile(nic_env_file):
            raise ValueError("\nError: The nic_env_file {} does not "
                             "exist!".format(nic_env_file))

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        # logger.info("Applying patches to director...")
        # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        # status = os.system(cmd)
        # if status != 0:
        #    raise ValueError("\nError: {} failed, unable to continue.  See "
        #                     "the comments in that file for additional "
        #                     "information".format(cmd))
        # Pass the parameters required by puppet which will be used
        # to enable/disable dell nfv features
        # Edit the dellnfv_environment.yaml
        # If disabled, default values will be set and
        # they won't be used for configuration
        # Create ConfigOvercloud object
        config = ConfigOvercloud(args.overcloud_name)
        # Remove this when Numa siblings added
        # Edit the dellnfv_environment.yaml
        config.edit_environment_files(
            args.mtu, args.enable_hugepages, args.enable_numa,
            args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk,
            args.sriov, nic_env_file, args.mariadb_max_connections,
            args.innodb_buffer_pool_size, args.innodb_buffer_pool_instances,
            args.num_controllers, args.num_storage, control_flavor,
            ceph_storage_flavor, swift_storage_flavor, block_storage_flavor,
            args.vlan_range, args.num_dell_computes)

        # Launch the deployment

        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        # The roles_data.yaml must be included at the beginning.
        # This is needed to enable the custome role Dell Compute.
        # It overrides the default roles_data.yaml
        env_opts = "-r ~/pilot/templates/roles_data.yaml"

        # The network-environment.yaml must be included after the
        # network-isolation.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "network-isolation.yaml" \
                    " -e ~/pilot/templates/network-environment.yaml" \
                    " -e {}" \
                    " -e ~/pilot/templates/ceph-osd-config.yaml" \
                    "".format(nic_env_file)

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        # The neutron-ovs-dvr.yaml.yaml must be included after the
        # network-environment.yaml
        if args.dvr_enable:
            env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "storage-environment.yaml" \
                    " -e ~/overcloud_images.yaml" \
                    " -e ~/pilot/templates/dell-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "puppet-pacemaker.yaml"
        host_config = False
        if args.enable_hugepages or args.enable_numa:
            env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                        "host-config-and-reboot.yaml"
            host_config = True
        if args.ovs_dpdk:
            if not args.enable_hugepages or not args.enable_numa:
                raise ValueError("Both hugepages and numa must be" +
                                 "enabled in order to use OVS-DPDK")
            else:
                env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml"

        if args.sriov:
            env_opts += " -e ~/pilot/templates/neutron-sriov.yaml"
            env_opts += " -e ~/pilot/templates/ovs-hw-offload.yaml"
            if not host_config:
                env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                            "host-config-and-reboot.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dell-cinder-backends.yaml"

        cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " -e /usr/share/openstack-tripleo-heat-templates/" \
              "environments/ceph-ansible/ceph-ansible.yaml" \
              " -e /usr/share/openstack-tripleo-heat-templates/" \
              "environments/ceph-ansible/ceph-rgw.yaml" \
              " {}" \
              " --libvirt-type kvm" \
              " --ntp-server {}" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        args.ntp_server_fqdn,
                        )

        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        print cmd
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        logger.info('\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start))))
        logger.info('Fetching SSH keys...')

        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
            logger.info("\nDeployment Completed")
        else:
            horizon_url = None

        logger.info('Overcloud nodes:')
        identify_nodes()

        if horizon_url:
            logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url))
    except Exception as err:
        print >> sys.stderr, err
        sys.exit(1)
コード例 #4
0
ファイル: deploy-overcloud.py プロジェクト: uday344/JetPack
def create_flavors():
    logger.info("Creating overcloud flavors...")

    flavors = [{
        "id": "1",
        "name": "m1.tiny",
        "memory": 512,
        "disk": 1,
        "cpus": 1
    }, {
        "id": "2",
        "name": "m1.small",
        "memory": 2048,
        "disk": 20,
        "cpus": 1
    }, {
        "id": "3",
        "name": "m1.medium",
        "memory": 4096,
        "disk": 40,
        "cpus": 2
    }, {
        "id": "4",
        "name": "m1.large",
        "memory": 8192,
        "disk": 80,
        "cpus": 4
    }, {
        "id": "5",
        "name": "m1.xlarge",
        "memory": 16384,
        "disk": 160,
        "cpus": 8
    }]

    os_auth_url, os_tenant_name, os_username, os_password, \
        os_user_domain_name, os_project_domain_name = \
        CredentialHelper.get_overcloud_creds()

    kwargs = {
        'username': os_username,
        'password': os_password,
        'auth_url': os_auth_url,
        'project_name': os_tenant_name,
        'user_domain_name': os_user_domain_name,
        'project_domain_name': os_project_domain_name
    }
    n_client = nova_client.Client(2, **kwargs)

    existing_flavor_ids = []
    for existing_flavor in n_client.flavors.list(detailed=False):
        existing_flavor_ids.append(existing_flavor.id)

    for flavor in flavors:
        if flavor["id"] not in existing_flavor_ids:
            print '    Creating ' + flavor["name"]
            n_client.flavors.create(flavor["name"],
                                    flavor["memory"],
                                    flavor["cpus"],
                                    flavor["disk"],
                                    flavorid=flavor["id"])
        else:
            print '    Flavor ' + flavor["name"] + " already exists"
コード例 #5
0
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--dell-computes",
                            dest="num_dell_computes",
                            type=int,
                            required=True,
                            help="The number of dell compute nodes")
        parser.add_argument("--dell-computeshci",
                            dest="num_dell_computeshci",
                            type=int,
                            required=True,
                            help="The number of dell hci compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")
        parser.add_argument("--powerflex",
                            dest="num_powerflex",
                            type=int,
                            required=True,
                            help="The number of powerflex storage nodes")
        parser.add_argument("--enable_hugepages",
                            action='store_true',
                            default=False,
                            help="Enable/Disable hugepages feature")
        parser.add_argument("--enable_numa",
                            action='store_true',
                            default=False,
                            help="Enable/Disable numa feature")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--nic_env_file",
                            default="5_port/nic_environment.yaml",
                            help="The NIC environment file to use")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timezone",
                            dest="time_zone",
                            default="America/Chicago",
                            help="The timezone to use")
        parser.add_argument("--timeout",
                            default="300",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument("--hugepages_size",
                            dest="hugepages_size",
                            required=False,
                            default="1GB",
                            help="HugePages size")
        parser.add_argument("--hostos_cpu_count",
                            dest="hostos_cpu_count",
                            required=False,
                            default="4",
                            help="HostOs Cpus to be configured")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--enable_unity',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Unity backend")
        parser.add_argument('--enable_unity_manila',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Unity Manila backend")
        parser.add_argument('--enable_powermax',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Powermax backend")
        parser.add_argument('--powermax_protocol',
                            dest='powermax_protocol',
                            required=False,
                            default="iSCSI",
                            help="Dell EMC Powermax Protocol - iSCSI or FC")
        parser.add_argument('--enable_powermax_manila',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC PowerMax  Manila backend")
        parser.add_argument('--enable_powerstore',
                            action='store_true',
                            default=False,
                            help="Enable Dell EMC Powerstore backend")

        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--octavia_enable',
                            action='store_true',
                            default=False,
                            help="Enables Octavia Load Balancer")
        parser.add_argument('--octavia_user_certs_keys',
                            action='store_true',
                            default=False,
                            help="Enables Octavia Load Balancer with "
                            "user provided certs and keys")
        parser.add_argument('--dvr_enable',
                            action='store_true',
                            default=False,
                            help="Enables Distributed Virtual Routing")
        parser.add_argument('--barbican_enable',
                            action='store_true',
                            default=False,
                            help="Enables Barbican key manager")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--ovs_dpdk',
                            action='store_true',
                            default=False,
                            help="Enable OVS+DPDK")
        parser.add_argument('--sriov',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV")
        parser.add_argument('--hw_offload',
                            action='store_true',
                            default=False,
                            help="Enable SR-IOV Offload")
        parser.add_argument('--sriov_interfaces',
                            dest="sriov_interfaces",
                            default=False,
                            help="SR-IOV interfaces count")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        parser.add_argument("--mtu",
                            dest="mtu",
                            type=int,
                            required=True,
                            default=1500,
                            help="Tenant Network MTU")
        parser.add_argument("--dashboard_enable",
                            action='store_true',
                            default=False,
                            help="Enable the ceph dashboard deployment")
        parser.add_argument('--network_data',
                            action='store_true',
                            default=False,
                            help="Use network_data.yaml to create edge site "
                            "networks")

        LoggingHelper.add_argument(parser)
        args = parser.parse_args()
        LoggingHelper.configure_logging(args.logging_level)
        p = re.compile('\d+:\d+')  # noqa: W605
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Validate that the NIC envronment file exists
        nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs",
                                    args.nic_env_file)
        if not os.path.isfile(nic_env_file):
            raise ValueError("\nError: The nic_env_file {} does not "
                             "exist!".format(nic_env_file))

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        # logger.info("Applying patches to director...")
        # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        # status = os.system(cmd)
        # if status != 0:
        #    raise ValueError("\nError: {} failed, unable to continue.  See "
        #                     "the comments in that file for additional "
        #                     "information".format(cmd))
        # Pass the parameters required by puppet which will be used
        # to enable/disable dell nfv features
        # Edit the dellnfv_environment.yaml
        # If disabled, default values will be set and
        # they won't be used for configuration
        # Create ConfigOvercloud object
        print("Configure environment file")
        config = ConfigOvercloud(args.overcloud_name)
        # Remove this when Numa siblings added
        # Edit the dellnfv_environment.yaml
        config.edit_environment_files(
            args.mtu, args.enable_hugepages, args.enable_numa,
            args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk,
            args.sriov, args.hw_offload, args.sriov_interfaces, nic_env_file,
            args.num_controllers, args.num_storage, control_flavor,
            ceph_storage_flavor, swift_storage_flavor, block_storage_flavor,
            args.vlan_range, args.time_zone, args.num_dell_computes,
            args.num_dell_computeshci, args.num_powerflex)

        # Launch the deployment
        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        env_opts = ""
        # If there are edge sites we have to use network_data.yaml and
        # it must in as first argument.
        if args.network_data:
            env_opts += "-n ~/pilot/templates/network_data.yaml "
        # The roles_data.yaml must be included at the beginning.
        # This is needed to enable the custom role Dell Compute.
        # It overrides the default roles_data.yaml
        env_opts += "-r ~/pilot/templates/roles_data.yaml"

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        # The configure-barbican.yaml must be included after the
        # network-environment.yaml
        if args.barbican_enable:
            env_opts += " -e ~/pilot/templates/configure-barbican.yaml"

        # The octavia.yaml must be included after the
        # network-environment.yaml
        if args.octavia_enable:
            env_opts += " -e ~/pilot/templates/octavia.yaml"
            if args.octavia_user_certs_keys is True:
                env_opts += " -e ~/pilot/templates/cert_keys.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The neutron-ovs.yaml must be included before dell-environment.yaml to enable ovs and disable ovn
        # in OSP16.1. In case we need to use OVN in future, please delete this line
        env_opts += " -e ~/pilot/templates/overcloud/environments/services/neutron-ovs.yaml"

        # The neutron-ovs-dvr.yaml.yaml must be included after the
        # neutron-ovs.yaml
        if args.dvr_enable:
            env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        if args.num_powerflex > 0:
            env_opts += " -e ~/containers-prepare-parameter.yaml" \
                        " -e ~/pilot/templates/dell-environment.yaml"
        else:
            env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                        "storage-environment.yaml" \
                        " -e ~/containers-prepare-parameter.yaml" \
                        " -e ~/pilot/templates/dell-environment.yaml"

        if args.ovs_dpdk:
            if not args.enable_hugepages or not args.enable_numa:
                raise ValueError("Both hugepages and numa must be" +
                                 "enabled in order to use OVS-DPDK")
            else:
                env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml"

        if args.sriov:
            if not args.enable_numa:
                raise ValueError("Numa cpu pinning must be " +
                                 "enabled in order to use SRIOV")
            else:
                env_opts += " -e ~/pilot/templates/neutron-sriov.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dellsc-cinder-config.yaml"

        if args.enable_unity:
            env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \
                        "container.yaml"
            env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \
                        "backend.yaml"

        if args.enable_unity_manila:
            env_opts += " -e ~/pilot/templates/unity-manila-container.yaml"
            env_opts += " -e ~/pilot/templates/unity-manila-config.yaml"

        if args.enable_powermax:
            if args.powermax_protocol == "iSCSI":
                env_opts += " -e ~/pilot/templates/dellemc-powermax-iscsi-cinder-" \
                         "backend.yaml"
            else:
                env_opts += " -e ~/pilot/templates/dellemc-powermax-fc-cinder-" \
                         "backend.yaml"
        if args.enable_powermax_manila:
            env_opts += " -e ~/pilot/templates/powermax-manila-config.yaml"

        if args.enable_powerstore:
            env_opts += " -e ~/pilot/templates/dellemc-powerstore-cinder-backend.yaml"

        if args.num_powerflex > 0:
            env_opts += " -e ~/pilot/templates/overcloud/environments/powerflex-ansible/powerflex-ansible.yaml"
            env_opts += " -e ~/pilot/templates/dellemc-powerflex-cinder-backend.yaml"
            env_opts += " -e ~/pilot/templates/custom-dellemc-volume-mappings.yaml"
        else:
            env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml" \
                        " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-rgw.yaml"

        if args.dashboard_enable:
            env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-dashboard.yaml"
            env_opts += " -e ~/pilot/templates/ceph_dashboard_admin.yaml"

        # The network-environment.yaml must be included after other templates
        # for effective parameter overrides (External vlan default route)
        # The network-environment.yaml must be included after the network-isolation.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "network-isolation.yaml" \
                    " -e ~/pilot/templates/network-environment.yaml" \
                    " -e {} " \
                    "-e ~/pilot/templates/site-name.yaml".format(nic_env_file)

        cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " {}" \
              " --libvirt-type kvm" \
              " --no-cleanup" \
              " --ntp-server {}" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        args.ntp_server_fqdn,
                        )
        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        logger.info('\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start))))
        logger.info('Fetching SSH keys...')

        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
            logger.info("\nDeployment Completed")
        else:
            horizon_url = None

        logger.info('Overcloud nodes:')
        identify_nodes()

        if horizon_url:
            logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url))
    except Exception as err:
        print(sys.stderr, err)
        raise
        sys.exit(1)
コード例 #6
0
ファイル: deploy-overcloud.py プロジェクト: kholohan/JetPack
def main():
    try:
        global args
        parser = argparse.ArgumentParser()
        parser.add_argument("--controllers",
                            dest="num_controllers",
                            type=int,
                            default=3,
                            help="The number of controller nodes")
        parser.add_argument("--computes",
                            dest="num_computes",
                            type=int,
                            required=True,
                            help="The number of compute nodes")
        parser.add_argument("--storage",
                            dest="num_storage",
                            type=int,
                            required=True,
                            help="The number of storage nodes")
        parser.add_argument("--vlans",
                            dest="vlan_range",
                            required=True,
                            help="The VLAN range to use for Neutron in "
                            " xxx:yyy format")
        parser.add_argument("--ntp",
                            dest="ntp_server_fqdn",
                            default="0.centos.pool.ntp.org",
                            help="The FQDN of the ntp server to use")
        parser.add_argument("--timeout",
                            default="120",
                            help="The amount of time in minutes to allow the "
                            "overcloud to deploy")
        parser.add_argument("--overcloud_name",
                            default=None,
                            help="The name of the overcloud")
        parser.add_argument('--enable_dellsc',
                            action='store_true',
                            default=False,
                            help="Enable cinder Dell Storage Center backend")
        parser.add_argument('--disable_rbd',
                            action='store_true',
                            default=False,
                            help="Disable cinder Ceph and rbd backend")
        parser.add_argument('--static_ips',
                            action='store_true',
                            default=False,
                            help="Specify the IPs on the overcloud nodes")
        parser.add_argument('--static_vips',
                            action='store_true',
                            default=False,
                            help="Specify the VIPs for the networks")
        parser.add_argument('--node_placement',
                            action='store_true',
                            default=False,
                            help="Control which physical server is assigned "
                            "which instance")
        parser.add_argument("--debug",
                            default=False,
                            action='store_true',
                            help="Indicates if the deploy-overcloud script "
                            "should be run in debug mode")
        args = parser.parse_args()
        p = re.compile('\d+:\d+')
        if not p.match(args.vlan_range):
            raise ValueError("Error: The VLAN range must be a number followed "
                             "by a colon, followed by another number")

        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        # Set up the default flavors
        control_flavor = "control"
        compute_flavor = "compute"
        ceph_storage_flavor = "ceph-storage"
        swift_storage_flavor = "swift-storage"
        block_storage_flavor = "block-storage"

        if args.node_placement:
            validate_node_placement()

            # If node-placement is specified, then the baremetal flavor must
            # be used
            control_flavor = BAREMETAL_FLAVOR
            compute_flavor = BAREMETAL_FLAVOR
            ceph_storage_flavor = BAREMETAL_FLAVOR
            swift_storage_flavor = BAREMETAL_FLAVOR
            block_storage_flavor = BAREMETAL_FLAVOR

        # Apply any patches required on the Director itself. This is done each
        # time the overcloud is deployed (instead of once, after the Director
        # is installed) in order to ensure an update to the Director doesn't
        # overwrite the patch.
        print 'Applying patches to director...'
        cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh')
        status = os.system(cmd)
        if status != 0:
            raise ValueError("\nError: {} failed, unable to continue.  See "
                             "the comments in that file for additional "
                             "information".format(cmd))

        # Launch the deployment

        overcloud_name_opt = ""
        if args.overcloud_name is not None:
            overcloud_name_opt = "--stack " + args.overcloud_name

        debug = ""
        if args.debug:
            debug = "--debug"

        # The order of the environment files is important as a later inclusion
        # overrides resources defined in prior inclusions.

        # The network-environment.yaml must be included after the
        # network-isolation.yaml
        env_opts = "-e ~/pilot/templates/overcloud/environments/" \
                   "network-isolation.yaml" \
                   " -e ~/pilot/templates/network-environment.yaml" \
                   " -e ~/pilot/templates/ceph-osd-config.yaml"

        # The static-ip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_ips:
            env_opts += " -e ~/pilot/templates/static-ip-environment.yaml"

        # The static-vip-environment.yaml must be included after the
        # network-environment.yaml
        if args.static_vips:
            env_opts += " -e ~/pilot/templates/static-vip-environment.yaml"

        if args.node_placement:
            env_opts += " -e ~/pilot/templates/node-placement.yaml"

        # The dell-environment.yaml must be included after the
        # storage-environment.yaml and ceph-radosgw.yaml
        env_opts += " -e ~/pilot/templates/overcloud/environments/" \
                    "storage-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "ceph-radosgw.yaml" \
                    " -e ~/pilot/templates/dell-environment.yaml" \
                    " -e ~/pilot/templates/overcloud/environments/" \
                    "puppet-pacemaker.yaml"

        if args.enable_dellsc:
            env_opts += " -e ~/pilot/templates/dell-cinder-backends.yaml"

        cmd = "cd ; openstack overcloud deploy" \
              " {}" \
              " --log-file ~/pilot/overcloud_deployment.log" \
              " -t {}" \
              " {}" \
              " --templates ~/pilot/templates/overcloud" \
              " {}" \
              " --control-flavor {}" \
              " --compute-flavor {}" \
              " --ceph-storage-flavor {}" \
              " --swift-storage-flavor {}" \
              " --block-storage-flavor {}" \
              " --neutron-public-interface bond1" \
              " --neutron-network-type vlan" \
              " --neutron-disable-tunneling" \
              " --libvirt-type kvm" \
              " --os-auth-url {}" \
              " --os-project-name {}" \
              " --os-user-id {}" \
              " --os-password {}" \
              " --control-scale {}" \
              " --compute-scale {}" \
              " --ceph-storage-scale {}" \
              " --ntp-server {}" \
              " --neutron-network-vlan-ranges physint:{},physext" \
              " --neutron-bridge-mappings physint:br-tenant,physext:br-ex" \
              "".format(debug,
                        args.timeout,
                        overcloud_name_opt,
                        env_opts,
                        control_flavor,
                        compute_flavor,
                        ceph_storage_flavor,
                        swift_storage_flavor,
                        block_storage_flavor,
                        os_auth_url,
                        os_tenant_name,
                        os_username,
                        os_password,
                        args.num_controllers,
                        args.num_computes,
                        args.num_storage,
                        args.ntp_server_fqdn,
                        args.vlan_range)

        with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'),
                  'w') as f:
            f.write(cmd.replace(' -', ' \\\n -'))
            f.write('\n')
        print cmd
        start = time.time()
        status = run_deploy_command(cmd)
        end = time.time()
        print '\nExecution time: {} (hh:mm:ss)'.format(
            time.strftime('%H:%M:%S', time.gmtime(end - start)))
        print 'Fetching SSH keys...'
        update_ssh_config()
        if status == 0:
            horizon_url = finalize_overcloud()
        else:
            horizon_url = None
        print 'Overcloud nodes:'
        identify_nodes()

        if horizon_url:
            print '\nHorizon Dashboard URL: {}\n'.format(horizon_url)
    except ValueError as err:
        print >> sys.stderr, err
        sys.exit(1)
コード例 #7
0
def config_idrac(instack_lock,
                 ip_service_tag,
                 node_definition=Constants.INSTACKENV_FILENAME,
                 model_properties=Utils.get_model_properties(
                     Constants.MODEL_PROPERTIES_FILENAME),
                 pxe_nic=None,
                 password=None,
                 skip_nic_config=False):
    node = CredentialHelper.get_node_from_instack(ip_service_tag,
                                                  node_definition)
    if not node:
        raise ValueError("Unable to find {} in {}".format(
            ip_service_tag, node_definition))
    drac_ip = node["pm_addr"]
    drac_user = node["pm_user"]
    drac_password = node["pm_password"]
    ironic_driver = node["pm_type"]

    if ironic_driver != "idrac":
        LOG.info("{} is using the {} driver.  No iDRAC configuration is "
                 "possible.".format(ip_service_tag, ironic_driver))

        if pxe_nic:
            LOG.warning("Ignoring specified PXE NIC ({})".format(pxe_nic))

        if password:
            LOG.warning("Ignoring specified password")

        return

    drac_client = DRACClient(drac_ip, drac_user, drac_password)

    reset_idrac(drac_client, ip_service_tag)

    # Clear out any pending jobs in the job queue and fix the condition where
    # there are no pending jobs, but the iDRAC thinks there are
    clear_job_queue(drac_client, ip_service_tag)
    if skip_nic_config:
        target_boot_mode = BootModeHelper.get_boot_mode(drac_client)
    elif BootModeHelper.is_boot_order_flexibly_programmable(drac_client):
        target_boot_mode = boot_mode_helper.DRAC_BOOT_MODE_UEFI
    else:
        target_boot_mode = boot_mode_helper.DRAC_BOOT_MODE_BIOS
    config_boot_mode(drac_client, ip_service_tag, node, target_boot_mode)

    job_ids = list()
    reboot_required = False

    pxe_nic_fqdd = get_pxe_nic_fqdd(pxe_nic, model_properties, drac_client)

    if skip_nic_config:
        provisioning_mac = get_nic_mac_address(drac_client, pxe_nic_fqdd)
        LOG.info("Skipping NIC configuration")
    else:
        # Configure the NIC port to PXE boot or not
        reboot_required_nic, nic_job_ids, provisioning_mac = \
            configure_nics_boot_settings(drac_client,
                                         ip_service_tag,
                                         pxe_nic_fqdd,
                                         node,
                                         target_boot_mode)

        reboot_required = reboot_required or reboot_required_nic
        if nic_job_ids:
            job_ids.extend(nic_job_ids)

    # Do initial idrac configuration
    reboot_required_idrac, idrac_job_id = config_idrac_settings(
        drac_client, ip_service_tag, password, node)
    reboot_required = reboot_required or reboot_required_idrac
    if idrac_job_id:
        job_ids.append(idrac_job_id)

    # If we need to reboot, then add a job for it
    if reboot_required:
        LOG.info("Rebooting {} to apply configuration".format(ip_service_tag))

        job_id = drac_client.create_reboot_job()
        job_ids.append(job_id)

    success = True
    if job_ids:
        drac_client.schedule_job_execution(job_ids, start_time='TIME_NOW')

        LOG.info("Waiting for iDRAC configuration to complete on {}".format(
            ip_service_tag))
        LOG.info("Do not unplug {}".format(ip_service_tag))

        # If the user set the password, then we need to change creds
        if password:
            new_drac_client = DRACClient(drac_ip, drac_user, password)

            # Try every 10 seconds over 2 minutes to connect with the new creds
            password_changed = False
            retries = 12
            while not password_changed and retries > 0:
                try:
                    LOG.debug("Attempting to access the iDRAC on {} with the "
                              "new password".format(ip_service_tag))
                    new_drac_client.is_idrac_ready()
                    password_changed = True
                except exceptions.WSManInvalidResponse as ex:
                    if "unauthorized" in str(ex).lower():
                        LOG.debug(
                            "Got an unauthorized exception on {}, so "
                            "sleeping and trying again".format(ip_service_tag))
                        retries -= 1
                        if retries > 0:
                            sleep(10)
                    else:
                        raise

            # If the new creds were successful then use them.  If they were not
            # successful then assume the attempt to change the password failed
            # and stick with the original creds
            if password_changed:
                LOG.debug(
                    "Successfully changed the password on {}.  "
                    "Switching to the new password".format(ip_service_tag))
                drac_client = new_drac_client
            else:
                success = False
                LOG.warning("Failed to change the password on {}".format(
                    ip_service_tag))

        all_jobs_succeeded = wait_for_jobs_to_complete(job_ids, drac_client,
                                                       ip_service_tag)

        if not all_jobs_succeeded:
            success = False

    if success and target_boot_mode == boot_mode_helper.DRAC_BOOT_MODE_BIOS:
        success = config_hard_disk_drive_boot_sequence(drac_client,
                                                       ip_service_tag)

    # We always want to update the password for the node in the instack file
    # if the user requested a password change and the iDRAC config job was
    # successful regardless of if the other jobs succeeded or not.
    new_password = None
    if password:
        job_status = drac_client.get_job(idrac_job_id).status

        if JobHelper.job_succeeded(job_status):
            new_password = password

    if new_password is not None or \
        "provisioning_mac" not in node or \
        ("provisioning_mac" in node
         and node["provisioning_mac"] != provisioning_mac):

        # Synchronize to prevent thread collisions while saving the instack
        # file
        if instack_lock is not None:
            LOG.debug("Acquiring the lock")
            instack_lock.acquire()
        try:
            if instack_lock is not None:
                LOG.debug("Clearing and reloading instack")
                # Force a reload of the instack file
                CredentialHelper.clear_instack_cache()
                node = CredentialHelper.get_node_from_instack(
                    ip_service_tag, node_definition)
            if new_password is not None:
                node["pm_password"] = new_password

            node["provisioning_mac"] = provisioning_mac

            LOG.debug("Saving instack")
            CredentialHelper.save_instack(node_definition)
        finally:
            if instack_lock is not None:
                LOG.debug("Releasing the lock")
                instack_lock.release()

    if success:
        LOG.info("Completed configuration of the iDRAC on {}".format(
            ip_service_tag))
    else:
        raise RuntimeError("An error occurred while configuring the iDRAC "
                           "on {}".format(drac_ip))
コード例 #8
0
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"

    @classmethod
    def get_minimum_memory_size(self, node_type):
        try:
            memory_size = []
            for node in ConfigOvercloud.nodes:
                node_uuid = node.uuid
                # Get the details of a node
                node_details = ConfigOvercloud.ironic_client.node.get(
                    node_uuid)
                # Get the memory count or size
                memory_count = node_details.properties['memory_mb']
                # Get the type details of the node
                node_properties_capabilities = node_details.properties[
                    'capabilities'].split(',')[0].split(':')[1]
                if node_type in node_properties_capabilities:
                    memory_size.append(memory_count)
            return min(memory_size)
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to get memory size {}".format(message))

    @classmethod
    def calculate_hostos_cpus(self, number_of_host_os_cpu):
        try:
            global HOST_OS_CPUS, VCPUS, TOTAL_CPUS
            cpu_count_list = []
            for node in ConfigOvercloud.nodes:
                # for every compute node get the corresponding drac credentials
                # to fetch the cpu details
                node_uuid = node.uuid
                node_details = ConfigOvercloud.ironic_client.node.get(
                    node_uuid)
                node_type = node_details.properties['capabilities'].split(
                    ',')[0].split(':')[1]
                if 'compute' not in node_type:
                    # filter for getting compute node
                    continue
                drac_ip, drac_user, drac_password = \
                    ConfigOvercloud.get_drac_credential.get_drac_creds(
                        ConfigOvercloud.ironic_client, node_uuid)
                stor = client.DRACClient(drac_ip, drac_user, drac_password)
                # cpu socket information for every compute node
                sockets = stor.list_cpus()
                cpu_count = 0
                for socket in sockets:
                    if socket.ht_enabled:
                        cpu_count += socket.cores * 2
                    else:
                        raise Exception("Hyperthreading is not enabled in " +
                                        str(node_uuid))
                cpu_count_list.append(cpu_count)

            min_cpu_count = min(cpu_count_list)
            if min_cpu_count not in [40, 48, 56, 64, 72, 128]:
                raise Exception("The number of vCPUs, as specified in the"
                                " reference architecture, must be one of"
                                " [40, 48, 56, 64, 72, 128]"
                                " but number of vCPUs are " +
                                str(min_cpu_count))
            number_of_host_os_cpu = int(number_of_host_os_cpu)
            logger.info("host_os_cpus {}".format(
                cpu_siblings.sibling_info[min_cpu_count][number_of_host_os_cpu]
                ["host_os_cpu"]))
            logger.info(
                "vcpus {}".format(cpu_siblings.sibling_info[min_cpu_count]
                                  [number_of_host_os_cpu]["vcpu_pin_set"]))
            siblings_info = cpu_siblings.sibling_info[min_cpu_count][
                number_of_host_os_cpu]
            HOST_OS_CPUS = siblings_info["host_os_cpu"]
            VCPUS = siblings_info["vcpu_pin_set"]
            TOTAL_CPUS = min_cpu_count
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to calculate "
                            "Numa Vcpu list {}".format(message))

    @classmethod
    def calculate_hugepage_count(self, hugepage_size):
        try:
            memory_count = ConfigOvercloud.get_minimum_memory_size("compute")
            # RAM size should be more than 128G
            if memory_count < 128000:
                raise Exception("RAM size is less than 128GB"
                                "make sure to have all prerequisites")
            # Subtracting
            # 16384MB = (Host Memory 12GB + Kernel Memory 4GB)
            memory_count = (memory_count - 16384)
            if hugepage_size == "2MB":
                hugepage_count = (memory_count / 2)
            if hugepage_size == "1GB":
                hugepage_count = (memory_count / 1024)
            logger.info("hugepage_size {}".format(hugepage_size))
            logger.info("hugepage_count {}".format(hugepage_count))
            return hugepage_count
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception("Failed to calculate"
                            " hugepage count {}".format(message))

    def edit_environment_files(self,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               nic_env_file,
                               mariadb_max_connections,
                               innodb_buffer_pool_size,
                               innodb_buffer_pool_instances,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               dell_compute_count=0):
        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend((
                'sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                str(dell_compute_count) + '|" ' + file_path,
                'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                str(controller_count) + '|" ' + file_path,
                'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                str(ceph_storage_count) + '|" ' + file_path,
                'sed -i "s|OvercloudControllerFlavor:.*|OvercloudControllerFlavor: '
                + str(controller_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudCephStorageFlavor:.*|OvercloudCephStorageFlavor: '
                + str(ceph_storage_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudSwiftStorageFlavor:.*|OvercloudSwiftStorageFlavor: '
                + str(swift_storage_flavor) + '|" ' + file_path,
                'sed -i "s|OvercloudBlockStorageFlavor:.*|OvercloudBlockStorageFlavor: '
                + str(block_storage_flavor) + '|" ' + file_path,
                'sed -i "s|NeutronNetworkVLANRanges:.*|NeutronNetworkVLANRanges: '
                + 'physint:' + str(vlan_range) + ',physext'
                '|" ' + file_path))

            if enable_hugepage:
                hpg_num = ConfigOvercloud.calculate_hugepage_count(
                    hugepage_size)
                hugecmd = 'default_hugepagesz=' + \
                    hugepage_size + ' hugepagesz=' + \
                    hugepage_size[0:-1]+' hugepages=' + \
                    str(hpg_num)+' iommu=pt intel_iommu=on'
                if not ovs_dpdk:
                    cmds.append('sed -i "s|HugepagesEnable.*|' +
                                'HugepagesEnable: true|" ' + file_path)
                    cmds.append("sed -i 's|HugePages:.*|HugePages: \"" +
                                hugecmd + "\"|' " + file_path)

            if enable_numa:
                ConfigOvercloud.calculate_hostos_cpus(hostos_cpu_count)
                if not ovs_dpdk:
                    cmds.append('sed -i "s|NumaEnable:.*|NumaEnable: true|" ' +
                                file_path)
                    cmds.append("sed -i 's|NumaCpus:.*|NumaCpus: " + VCPUS +
                                "|' " + file_path)
                    cmds.append('sed -i "s|  # NovaVcpuPinSet|  ' +
                                'NovaVcpuPinSet|" ' + file_path)
                    cmds.append(
                        "sed -i 's|NovaVcpuPinSet:.*|NovaVcpuPinSet: \"" +
                        VCPUS + "\"|' " + file_path)
            if ovs_dpdk:
                for each in re.split(r'[_/]', nic_env_file):
                    if each.find('mode') != -1:
                        ovs_dpdk_mode = each[-1:]
                siblings_info = cpu_siblings.sibling_info[TOTAL_CPUS][int(
                    hostos_cpu_count)]
                if ovs_dpdk_mode == '1':
                    pmd_cores = siblings_info["mode1_pmd_cores"]
                    pmd_rem_cores = siblings_info["mode1_rem_cores"]
                else:
                    pmd_cores = siblings_info["mode2_pmd_cores"]
                    pmd_rem_cores = siblings_info["mode2_rem_cores"]
                cmds.append(
                    'sed -i "s|NeutronDpdkCoreList:.*|NeutronDpdkCoreList: \\"'
                    + pmd_cores.join(["'", "'"]) + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|PmdRemCores:.*|PmdRemCores: "' +
                            pmd_rem_cores + '"|" ' + dpdk_file)
                cmds.append("sed -i 's|HugePages:.*|HugePages: \"" + hugecmd +
                            "\"|' " + dpdk_file)
                cmds += [
                    'sed -i "s|HostOsCpus:.*|HostOsCpus: "' + HOST_OS_CPUS +
                    '"|" ' + dpdk_file,
                    'sed -i "s|VcpuPinSet:.*|VcpuPinSet: "' + VCPUS + '"|" ' +
                    dpdk_file,
                ]

            # Performance and Optimization
            if innodb_buffer_pool_size != "dynamic":
                BufferPoolSize = int(innodb_buffer_pool_size.replace(
                    "G", "")) * 1024
                memory_mb = ConfigOvercloud.get_minimum_memory_size("control")
                if memory_mb < BufferPoolSize:
                    raise Exception("innodb_buffer_pool_size is greater than"
                                    " available memory size")
            cmds.append(
                'sed -i "s|MysqlMaxConnections.*|MysqlMaxConnections: ' +
                mariadb_max_connections + '|" ' + file_path)
            if ovs_dpdk:
                f_path = dpdk_file
            else:
                f_path = file_path
            cmds.append('sed -i "s|BufferPoolSize.*|BufferPoolSize: ' +
                        innodb_buffer_pool_size + '|" ' + f_path)
            cmds.append(
                'sed -i "s|BufferPoolInstances.*|BufferPoolInstances: ' +
                innodb_buffer_pool_instances + '|" ' + f_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")
            # Create host object
            host_obj = hosts.HostManager(nova)

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in host_obj.list():
                if "dell-compute" in host.host_name:
                    hostname = str(host.host_name)
                    dell_hosts.append(hostname)
            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")

    def edit_aggregate_environment_file(self, hostname_list):
        logger.info("Editing create aggregate environment file")
        file_path = home_dir \
            + '/pilot/templates/create_aggregate_environment.yaml'
        if not os.path.isfile(file_path):
            raise Exception(
                "The create_aggregate_environment.yaml file does not exist")
        cmd = ('sed -i "s|hosts:.*|hosts: ' + str(hostname_list) + '|" ' +
               file_path)

        status = os.system(cmd)
        logger.info("cmd: {}".format(cmd))
        if status != 0:
            raise Exception("Failed to execute the command {}"
                            " with error code {}".format(cmd, status))

    def create_aggregate(self):
        UC_AUTH_URL, UC_PROJECT_ID, UC_USERNAME, UC_PASSWORD = \
            CredentialHelper.get_overcloud_creds()
        # Create nova client object
        nova = nvclient.Client(2, UC_USERNAME, UC_PASSWORD, UC_PROJECT_ID,
                               UC_AUTH_URL)
        hostname_list = self.get_dell_compute_nodes_hostnames(nova)
        self.edit_aggregate_environment_file(hostname_list)
        env_opts = \
            " -e ~/pilot/templates/create_aggregate_environment.yaml"

        cmd = self.overcloudrc + "openstack stack create " \
            " Dell_Aggregate" \
            " --template" \
            " ~/pilot/templates/createaggregate.yaml" \
            " {}" \
            "".format(env_opts)
        aggregate_create_status = os.system(cmd)
        if aggregate_create_status == 0:
            logger.info("Dell_Aggregate created")
        else:
            raise Exception("Aggregate {} could not be created..."
                            " Exiting post deployment tasks")

    def post_deployment_tasks(self):
        try:
            logger.info("Initiating post deployment tasks")
            # create aggregate
            self.create_aggregate()
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            raise Exception(message)
コード例 #9
0
ファイル: dell_nfv_edge.py プロジェクト: usama2490/JetPack
class ConfigEdge(ConfigOvercloud):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name, node_type, node_type_data):
        self.node_type = node_type
        self.node_type_data = json.loads(node_type_data)
        self.mtu = int(self.node_type_data["nfv_mtu"])
        _dir = (re.sub(r'[^a-z0-9]', " ", node_type.lower()).replace(" ", "_"))
        _ntl = re.sub(r'[^a-z0-9]', "", node_type.lower())
        ne_name = "nic_environment_{}.yaml".format(_ntl)
        instack_name = "instackenv_{}.json".format(_ntl)
        nic_env_file = os.path.join(home_dir, _dir, ne_name)
        instackenv_file = os.path.join(home_dir, _dir, instack_name)
        self.instackenv = instackenv_file
        self.nic_env = nic_env_file
        super().__init__(overcloud_name)

    def fetch_nfv_parameters(self):
        logger.debug("Retrieving NFV parameters")
        ntd = self.node_type_data
        enable_hugepage = Utils.string_to_bool(ntd["hpg_enable"])
        enable_numa = Utils.string_to_bool(ntd["numa_enable"])
        nfv_type = self._get_nfv_type(ntd)
        is_ovs_dpdk = bool(nfv_type and nfv_type in ["dpdk", "both"])
        hostos_cpu_count = int(ntd["numa_hostos_cpu_count"])
        _dir = (re.sub(r'[^a-z0-9]', " ",
                       self.node_type.lower()).replace(" ", "_"))
        ntl = re.sub(r'[^a-z0-9]', "", self.node_type.lower())
        _f_name = "nic_environment_{}.yaml".format(ntl)
        nic_env_file = os.path.join(home_dir, _dir, _f_name)
        params = {}
        params_dell_env = params["dell_env"] = {}
        kernel_args = "iommu=pt intel_iommu=on"

        if enable_hugepage:
            hpg_num = self.nfv_params.calculate_hugepage_count(ntd["hpg_size"])
            kernel_args += (" default_hugepagesz={} hugepagesz={}"
                            " hugepages={}").format(ntd["hpg_size"],
                                                    ntd["hpg_size"][0:-1],
                                                    str(hpg_num))
        if enable_numa:
            _, node_data = self.nfv_params.select_compute_node(
                self.node_type, self.instackenv)
            self.nfv_params.parse_data(node_data)
            self.nfv_params.get_all_cpus()
            self.nfv_params.get_host_cpus(hostos_cpu_count)
            self.nfv_params.get_nova_cpus()
            self.nfv_params.get_isol_cpus()
            if is_ovs_dpdk:
                dpdk_nics = self.find_ifaces_by_keyword(nic_env_file, 'Dpdk')
                logger.debug("DPDK-NICs >>" + str(dpdk_nics))
                self.nfv_params.get_pmd_cpus(self.mtu, dpdk_nics)
                self.nfv_params.get_socket_memory(self.mtu, dpdk_nics)
            kernel_args += " isolcpus={}".format(self.nfv_params.isol_cpus)
            # dell-environmment role specific cpu parameters
            params_dell_env["IsolCpusList"] = self.nfv_params.isol_cpus
            params_dell_env[
                "NovaComputeCpuDedicatedSet"] = self.nfv_params.nova_cpus
        if is_ovs_dpdk:
            params_dpdk = params["dpdk"] = {}
            params_dpdk["OvsDpdkCoreList"] = self.nfv_params.host_cpus
            params_dpdk["OvsPmdCoreList"] = self.nfv_params.pmd_cpus
            params_dpdk["OvsDpdkSocketMemory"] = self.nfv_params.socket_mem
            # params_dpdk["IsolCpusList"] = self.nfv_params.isol_cpus # Populated in dell_env file
            # params_dpdk["NovaComputeCpuDedicatedSet"] = self.nfv_params.nova_cpus # Populated in dell_env file
            # params_dpdk["NovaComputeCpuSharedSet"] = self.nfv_params.shared_cpus # Not used in current Architecture

        params_dell_env["KernelArgs"] = kernel_args
        return params

    def _get_nfv_type(self, node_type_data):
        if ("nfv_type" in node_type_data
                and len(node_type_data["nfv_type"].strip()) != 0
                and node_type_data["nfv_type"].strip()
                in ("dpdk", "sriov", "both")):
            return node_type_data["nfv_type"].strip()
        return None
コード例 #10
0
ファイル: validate_networks.py プロジェクト: alp317/JetPack-1
    def build_node_list(self):
        self.nodes = []

        # Pull in the nodes that nova doesn't know about in our json file
        for server_name in self.network_config["nodes"].keys():
            server = self.network_config["nodes"][server_name]
            node = self.Node(server_name, server["ip"], server["user"],
                             server["networks"])

            self.nodes.append(node)

        # Sort just these by name so the SAH/Director/Dashboard nodes come
        # first
        self.nodes.sort(key=lambda n: n.name)

        os_auth_url, os_tenant_name, os_username, os_password = \
            CredentialHelper.get_undercloud_creds()

        kwargs = {
            'os_username': os_username,
            'os_password': os_password,
            'os_auth_url': os_auth_url,
            'os_tenant_name': os_tenant_name
        }

        nova = nova_client.Client(
            '2',  # API version
            os_username,
            os_password,
            os_tenant_name,
            os_auth_url)

        ironic = ironicclient.client.get_client(1, **kwargs)

        # Build up a map that maps flavor ids to flavor names
        flavor_map = {}
        flavors = nova.flavors.list()
        for flavor in flavors:
            flavor_map[flavor.id] = flavor.name

        logger.debug("flavor_map is:")
        for flavor in flavor_map.keys():
            logger.debug("    " + flavor + " => " + flavor_map[flavor])

        # Get the nodes from nova
        tmp_nodes = []
        nova_servers = nova.servers.list()
        for nova_server in nova_servers:
            flavor_name = None
            if nova_server.flavor["id"]:
                flavor_name = flavor_map[nova_server.flavor["id"]]
                if flavor_name == "baremetal":
                    flavor_name = None

            if not flavor_name:
                ironic_server = ironic.node.get_by_instance_uuid(
                    nova_server.id)
                capabilities = ironic_server.properties["capabilities"]

                match = re.search("node:([a-zA-Z-]+)-\d+", capabilities)
                if match:
                    flavor_name = match.group(1)
                else:
                    logger.error("Unable to find flavor name for "
                                 "node {}".format(nova_server.name))
                    sys.exit(1)

            # From the flavor, get the networks
            networks = self.network_config["flavors_to_networks"][flavor_name]

            node = self.Node(nova_server.name,
                             nova_server.networks["ctlplane"][0], "heat-admin",
                             networks)
            tmp_nodes.append(node)

        # Sort the overcloud nodes by name to group the role types together
        tmp_nodes.sort(key=lambda n: n.name)
        self.nodes.extend(tmp_nodes)
コード例 #11
0
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"
        self.nfv_params = NfvParameters()

    def find_ifaces_by_keyword(self, yaml_file, keyword):
        nics = []
        with open(yaml_file, 'r') as f:
            content = f.readlines()
            for line in content:
                if keyword in line:
                    nics.append(line.split(':')[1].strip())
        return nics

    def edit_environment_files(self,
                               mtu,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               sriov,
                               nic_env_file,
                               mariadb_max_connections,
                               innodb_buffer_pool_size,
                               innodb_buffer_pool_instances,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               dell_compute_count=0):
        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend(
                ('sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                 str(dell_compute_count) + '|" ' + file_path,
                 'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                 str(controller_count) + '|" ' + file_path,
                 'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                 str(ceph_storage_count) + '|" ' + file_path,
                 'sed -i "s|OvercloudControllerFlavor:.*' +
                 '|OvercloudControllerFlavor: ' + str(controller_flavor) +
                 '|" ' + file_path, 'sed -i "s|OvercloudCephStorageFlavor:.*' +
                 '|OvercloudCephStorageFlavor: ' + str(ceph_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudSwiftStorageFlavor:.*' +
                 '|OvercloudSwiftStorageFlavor: ' + str(swift_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudBlockStorageFlavor:.*' +
                 '|OvercloudBlockStorageFlavor: ' + str(block_storage_flavor) +
                 '|" ' + file_path, 'sed -i "s|NeutronNetworkVLANRanges:.*' +
                 '|NeutronNetworkVLANRanges: ' + 'physint:' + str(vlan_range) +
                 ',physext'
                 '|" ' + file_path))
            kernel_args = ''
            if sriov or ovs_dpdk:
                kernel_args = "iommu=pt intel_iommu=on"
            if enable_hugepage:
                hpg_num = self.nfv_params.calculate_hugepage_count(
                    hugepage_size)
                kernel_args += " default_hugepagesz=%s hugepagesz=%s" \
                    " hugepages=%s" \
                    % (hugepage_size, hugepage_size[0:-1], str(hpg_num))

            if enable_numa:
                node_uuid, node_data = self.nfv_params.select_compute_node()
                self.nfv_params.parse_data(node_data)
                self.nfv_params.get_all_cpus()
                self.nfv_params.get_host_cpus(hostos_cpu_count)
                if ovs_dpdk:
                    dpdk_nics = self.find_ifaces_by_keyword(
                        nic_env_file, 'Dpdk')
                    self.nfv_params.get_pmd_cpus(mtu, dpdk_nics)
                    self.nfv_params.get_socket_memory(mtu, dpdk_nics)
                self.nfv_params.get_nova_cpus()
                self.nfv_params.get_isol_cpus()
                kernel_args += " isolcpus=%s" % self.nfv_params.nova_cpus
                cmds.append('sed -i "s|# NovaVcpuPinSet:.*|NovaVcpuPinSet: ' +
                            self.nfv_params.nova_cpus + '|" ' + file_path)
            cmds.append('sed -i "s|# DellComputeParameters:' +
                        '|DellComputeParameters:|" ' + file_path)
            if kernel_args:
                cmds.append('sed -i "s|# KernelArgs:.*|KernelArgs: \\"' +
                            kernel_args + '\\" |" ' + file_path)
            if ovs_dpdk:
                cmds.append(
                    'sed -i "s|OvsDpdkCoreList:.*|OvsDpdkCoreList: \\"' +
                    self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsPmdCoreList:.*|OvsPmdCoreList: \\"' +
                            self.nfv_params.pmd_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsDpdkSocketMemory:' +
                            '.*|OvsDpdkSocketMemory: \\"' +
                            self.nfv_params.socket_mem + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|# IsolCpusList:.*|IsolCpusList: ' +
                            self.nfv_params.isol_cpus + '|" ' + dpdk_file)

            # Performance and Optimization
            if innodb_buffer_pool_size != "dynamic":
                BufferPoolSize = int(innodb_buffer_pool_size.replace(
                    "G", "")) * 1024
                memory_mb = self.nfv_params.get_minimum_memory_size("control")
                if memory_mb < BufferPoolSize:
                    raise Exception("innodb_buffer_pool_size is greater than"
                                    " available memory size")
            cmds.append(
                'sed -i "s|MysqlMaxConnections.*|MysqlMaxConnections: ' +
                mariadb_max_connections + '|" ' + file_path)
            if ovs_dpdk:
                f_path = dpdk_file
            else:
                f_path = file_path
            cmds.append('sed -i "s|BufferPoolSize.*|BufferPoolSize: ' +
                        innodb_buffer_pool_size + '|" ' + f_path)
            cmds.append(
                'sed -i "s|BufferPoolInstances.*|BufferPoolInstances: ' +
                innodb_buffer_pool_instances + '|" ' + f_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in nova.servers.list():
                if "dell-compute" in host.name:
                    hostname = str(host.name)
                    dell_hosts.append(hostname)

            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")
コード例 #12
0
ファイル: introspect_nodes.py プロジェクト: uday344/JetPack
def introspect_nodes(in_band, ironic_client, nodes, transition_nodes=True):
    # Check to see if provisioning_mac has been set on all the nodes
    bad_nodes = []
    for node in nodes:
        if "provisioning_mac" not in node.properties:
            bad_nodes.append(node)

    if bad_nodes:
        ips = [CredentialHelper.get_drac_ip(node) for node in bad_nodes]
        fail_msg = "\n".join(ips)

        logger.error("Run config_idrac.py on {} before running "
                     "introspection".format(fail_msg))

    if transition_nodes:
        nodes = transition_to_state(ironic_client, nodes, 'manage',
                                    'manageable')

    threads = []
    bad_nodes = []
    inspecting = []
    for node in nodes:
        use_oob_introspection = is_introspection_oob(in_band, node, logger)

        introspection_type = "out-of-band"
        if not use_oob_introspection:
            introspection_type = "in-band"

        logger.info("Starting {} introspection on node "
                    "{} ({})".format(introspection_type,
                                     CredentialHelper.get_drac_ip(node),
                                     node.uuid))

        if not use_oob_introspection:
            thread = ThreadWithExHandling(logger,
                                          object_identity=node,
                                          target=ib_introspect,
                                          args=(node, ))
            threads.append(thread)
            thread.start()
        else:
            # Note that this CLI command is asynchronous, so it will return
            # immediately and Ironic conductor will begin OOB introspection in
            # the background
            if os.system("openstack baremetal node inspect " + node.uuid) != 0:
                bad_nodes.append(node)
            else:
                # Wait until the node goes into the inspecting state before
                # continuing.  This is necessary because OOB introspection
                # completes very quickly on some nodes.  The busy wait is
                # intentional.
                node = ironic_client.node.get(node.uuid)
                logger.debug(
                    CredentialHelper.get_drac_ip(node) + " provision_state=" +
                    node.provision_state)
                while node.provision_state != INSPECTING:
                    node = ironic_client.node.get(node.uuid)
                    logger.debug(
                        CredentialHelper.get_drac_ip(node) +
                        " provision_state=" + node.provision_state)
                logger.debug(
                    CredentialHelper.get_drac_ip(node) +
                    " adding to inspecting")
                inspecting.append(node)

    # Note that the in-band introspection CLI command is synchronous.  By the
    # time all of the threads have completed, the nodes are all out of the
    # "inspecting" state.
    for thread in threads:
        thread.join()

    for thread in threads:
        if thread.ex is not None:
            bad_nodes.append(thread.object_identity)

    if bad_nodes:
        ips = [
            "{} ({})".format(CredentialHelper.get_drac_ip(node), node.uuid)
            for node in bad_nodes
        ]
        raise RuntimeError("Failed to introspect {}".format(", ".join(ips)))

    if use_oob_introspection:
        # Wait for the nodes to transition out of "inspecting"
        # Allow 10 minutes to complete OOB introspection
        logger.info("Waiting for introspection to complete...")
        introspection_timeout = 600
        while introspection_timeout > 0:
            inspecting = refresh_nodes(ironic_client, inspecting)
            for node in inspecting:
                if node.provision_state != INSPECTING:
                    inspecting.remove(node)
            if len(inspecting) == 0:
                logger.info("Introspection finished")
                break
            else:
                logger.debug("Still inspecting=" + ", ".join([
                    "{} ({})".format(CredentialHelper.get_drac_ip(node),
                                     node.uuid) for node in inspecting
                ]))

                introspection_timeout -= 1
                if introspection_timeout > 0:
                    sleep(1)

        if introspection_timeout == 0:
            error_msg = "Introspection failed."
            if len(inspecting) > 0:
                inspecting_ips = [
                    "{} ({})".format(CredentialHelper.get_drac_ip(node),
                                     node.uuid) for node in inspecting
                ]
                error_msg += "  The following nodes never exited the " \
                    "{} state: {}.".format(INSPECTING,
                                           ", ".join(inspecting_ips))

            raise RuntimeError(error_msg)

    if not use_oob_introspection:
        # The PERC H740P RAID controller only makes virtual disks visible to
        # the host OS.  Physical disks are not visible with this controller
        # because it does not support pass-through mode.  This results in
        # local_gb not being set during IB introspection, which causes
        # problems further along in the flow.

        # Check to see if all nodes have local_gb defined, and if not run OOB
        # introspection to discover local_gb.
        nodes = refresh_nodes(ironic_client, nodes)
        bad_nodes = []
        for node in nodes:
            if 'local_gb' not in node.properties:
                bad_nodes.append(node)

        if bad_nodes:
            ips = [CredentialHelper.get_drac_ip(node) for node in bad_nodes]
            fail_msg = "\n".join(ips)

            logger.info("local_gb was not discovered on:  {}".format(fail_msg))

            logger.info("Running OOB introspection to populate it.")
            introspect_nodes(False,
                             ironic_client,
                             bad_nodes,
                             transition_nodes=False)

    if transition_nodes:
        nodes = transition_to_state(ironic_client, nodes, 'provide',
                                    'available')

    if use_oob_introspection:
        # FIXME: Remove this hack when OOB introspection is fixed
        for node in nodes:
            delete_non_pxe_ports(ironic_client, node)
コード例 #13
0
ファイル: introspect_nodes.py プロジェクト: kholohan/JetPack
def introspect_nodes(in_band, ironic_client, nodes,
                     transition_nodes=True):
    # Check to see if provisioning_mac has been set on all the nodes
    bad_nodes = []
    for node in nodes:
        if "provisioning_mac" not in node.properties:
            bad_nodes.append(node)

    if bad_nodes:
        ips = [CredentialHelper.get_drac_ip(node) for node in bad_nodes]
        fail_msg = "\n".join(ips)

        logger.error("Run config_idrac.py on {} before running "
                     "introspection".format(fail_msg))

    if transition_nodes:
        nodes = transition_to_state(ironic_client, nodes,
                                    'manage', 'manageable')

    threads = []
    bad_nodes = []
    for node in nodes:
        use_oob_introspection = is_introspection_oob(in_band, node, logger)

        introspection_type = "out-of-band"
        if not use_oob_introspection:
            introspection_type = "in-band"

        logger.info("Starting {} introspection on node "
                    "{} ({})".format(introspection_type,
                                     CredentialHelper.get_drac_ip(node),
                                     node.uuid))

        if not use_oob_introspection:
            thread = ThreadWithExHandling(logger,
                                          object_identity=node,
                                          target=ib_introspect,
                                          args=(node,))
            threads.append(thread)
            thread.start()
        else:
            if os.system("openstack baremetal node inspect " + node.uuid) != 0:
                bad_nodes.append(node)

    for thread in threads:
        thread.join()

    for thread in threads:
        if thread.ex is not None:
            bad_nodes.append(thread.object_identity)

    if bad_nodes:
        ips = ["{} ({})".format(CredentialHelper.get_drac_ip(node),
                                node.uuid) for node in bad_nodes]
        raise RuntimeError("Failed to introspect {}".format(", ".join(ips)))

    if not use_oob_introspection:
        # The PERC H740P RAID controller only makes virtual disks visible to
        # the host OS.  Physical disks are not visible with this controller
        # because it does not support pass-through mode.  This results in
        # local_gb not being set during IB introspection, which causes
        # problems further along in the flow.

        # Check to see if all nodes have local_gb defined, and if not run OOB
        # introspection to discover local_gb.
        nodes = refresh_nodes(ironic_client, nodes)
        bad_nodes = []
        for node in nodes:
            if 'local_gb' not in node.properties:
                bad_nodes.append(node)

        if bad_nodes:
            ips = [CredentialHelper.get_drac_ip(node) for node in bad_nodes]
            fail_msg = "\n".join(ips)

            logger.info("local_gb was not discovered on:  {}".format(fail_msg))

            logger.info("Running OOB introspection to populate it.")
            introspect_nodes(False, ironic_client, bad_nodes,
                             transition_nodes=False)

    if transition_nodes:
        nodes = transition_to_state(ironic_client, nodes,
                                    'provide', 'available')

    if use_oob_introspection:
        # FIXME: Remove this hack when OOB introspection is fixed
        for node in nodes:
            delete_non_pxe_ports(ironic_client, node)
コード例 #14
0
def main():

    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("-compute",
                       "--compute",
                       dest="compute_node_ip",
                       action="store",
                       default='')
    group.add_argument("-controller",
                       "--controller",
                       dest="controller_node_ip",
                       action="store",
                       default='')
    parser.add_argument('-f',
                        '--file',
                        help='name of json file containing the node being set',
                        default=Constants.INSTACKENV_FILENAME)
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
    instack_file = os.path.expanduser(args.file)

    # Run ~/pilot/identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(home_dir,
                       'pilot/identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get CONTROLLER_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get CONTROLLER_NODE_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    controller_node_names = p2.communicate()[0].split()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    LOG.setLevel(args.logging_level)

    LOG.debug("home_dir: {}".format(home_dir))
    LOG.debug("oc_stack_name: {}".format(oc_stack_name))
    LOG.debug("oc_auth_url: {}".format(oc_auth_url))
    LOG.debug("oc_username: {}".format(oc_username))
    LOG.debug("oc_password: {}".format(oc_password))
    LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
    LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
    LOG.debug("controller_nodes_names: {}".format(controller_nodes_ip))
    LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
    LOG.debug("compute_nova_names: {}".format(compute_nodes_ip))

    # Execute Compute node deletion
    if args.compute_node_ip != '':
        compute_node_ip = args.compute_node_ip.rstrip()
        if check_ip_validity(compute_node_ip):
            LOG.info("***  Removing a compute node {} to InstanceHA"
                     " configuration.".format(compute_node_ip))
            delete_compute_node_resources(compute_node_ip,
                                          first_controller_node_ip)
        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                compute_node_ip))
            exit(-1)

    # Execute Controller node deletion
    if args.controller_node_ip != '':
        controller_node_ip = args.controller_node_ip.rstrip()
        if check_ip_validity(controller_node_ip):
            LOG.info("***  Removing a controller node {} to InstanceHA"
                     " configuration.".format(controller_node_ip))
            LOG.debug("controller_node_ip: {}".format(controller_node_ip))
            delete_controller_node_resources(controller_node_ip,
                                             first_controller_node_ip)
        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                controller_node_ip))
            exit(-1)
コード例 #15
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')

    # Run update_ssh_config.py
    cmd = os.path.join(os.getcwd(), 'update_ssh_config.py')
    os.system(cmd)

    # Run identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(os.getcwd(),
                       'identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get first_controller_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/cntl0/ {print $2}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node = p2.communicate()[0].rstrip()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get first_compute_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(
        shlex.split('awk \'/nova0/ || /compute0/ {print $2}\''),
        stdin=p1.stdout,
        stdout=subprocess.PIPE)
    first_compute_node = p2.communicate()[0].rstrip()

    # Get first_compute_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova0|compute0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_compute_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()
    first_compute_nova_name = compute_nova_names[0]

    # Get CONTROLLER_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    controller_nova_names = p2.communicate()[0].split()
    first_controller_nova_name = controller_nova_names[0]

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    LOG.setLevel(args.logging_level)

    # Install RA instanceHA Configuration
    LOG.info(
        "***  Removing Instance HA for stack {}  ***".format(oc_stack_name))

    LOG.debug("home_dir: {}".format(home_dir))
    LOG.debug("oc_stack_name: {}".format(oc_stack_name))
    LOG.debug("oc_auth_url: {}".format(oc_auth_url))
    LOG.debug("oc_username: {}".format(oc_username))
    LOG.debug("oc_password: {}".format(oc_password))
    LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
    LOG.debug("first_controller_node: {}".format(first_controller_node))
    LOG.debug("first_controller_node_ip: {}".format(first_controller_node_ip))
    LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
    LOG.debug("first_compute_node: {}".format(first_compute_node))
    LOG.debug("first_compute_node_ip: {}".format(first_compute_node_ip))
    LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
    LOG.debug("compute_nova_names: {}".format(compute_nova_names))
    LOG.debug("first_compute_nova_name: {}".format(first_compute_nova_name))
    LOG.debug("controller_nova_names: {}".format(controller_nova_names))
    LOG.debug(
        "first_controller_nova_name: {}".format(first_controller_nova_name))

    cmd = "source {} ".format(undercloudrc_name)
    os.system(cmd)

    out = ssh_cmd(
        first_controller_node_ip, "heat-admin",
        "sudo pcs property show stonith-enabled \
                  | awk '/stonith/ {print $2}'")
    result = out[0].rstrip()
    LOG.debug("result: {}".format(result))

    if result == 'true':
        ssh_cmd(first_controller_node_ip, "heat-admin",
                "sudo pcs property set stonith-enabled=false")
        ssh_cmd(first_controller_node_ip, "heat-admin",
                "sudo pcs property set maintenance-mode=true")

    disable_control_plane_services(first_controller_node_ip)
    delete_compute_nodeName_resource(compute_nodes_ip,
                                     first_controller_node_ip)
    delete_compute_nodes_resources(first_controller_node_ip)
    delete_compute_nodes_stonith_devices(compute_nodes_ip,
                                         first_controller_node_ip)
    delete_nova_evacuate_resource(first_controller_node_ip)
    disable_remote_pacemaker(compute_nodes_ip)
    enable_openstack_services(compute_nodes_ip)

    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs property set maintenance-mode=false")
    ssh_cmd(first_controller_node_ip, "heat-admin",
            "sudo pcs property set stonith-enabled=true")
コード例 #16
0
ファイル: dell_nfv.py プロジェクト: ahmadfsbd/JetPack
class ConfigOvercloud(object):
    """
    Description: Class responsible for overcloud configurations.
    """
    ironic = IronicHelper()
    ironic_client = ironic.get_ironic_client()
    nodes = ironic_client.node.list()
    get_drac_credential = CredentialHelper()

    def __init__(self, overcloud_name):
        self.overcloud_name = overcloud_name
        self.overcloudrc = "source " + home_dir + "/"\
            + self.overcloud_name + "rc;"
        self.nfv_params = NfvParameters()

    def find_ifaces_by_keyword(self, yaml_file, keyword):
        nics = []
        with open(yaml_file, 'r') as f:
            content = f.readlines()
            for line in content:
                if keyword in line:
                    nics.append(line.split(':')[1].strip())
        return nics

    def edit_environment_files(self,
                               mtu,
                               enable_hugepage,
                               enable_numa,
                               hugepage_size,
                               hostos_cpu_count,
                               ovs_dpdk,
                               sriov,
                               hw_offload,
                               sriov_interfaces,
                               nic_env_file,
                               controller_count,
                               ceph_storage_count,
                               controller_flavor,
                               ceph_storage_flavor,
                               swift_storage_flavor,
                               block_storage_flavor,
                               vlan_range,
                               time_zone,
                               dell_compute_count=0,
                               dell_computehci_count=0,
                               dell_powerflex_count=0):

        try:
            logger.info("Editing dell environment file")
            file_path = home_dir + '/pilot/templates/dell-environment.yaml'
            dpdk_file = home_dir + '/pilot/templates/neutron-ovs-dpdk.yaml'
            hw_off_file = home_dir + '/pilot/templates/ovs-hw-offload.yaml'
            cmds = []
            if not os.path.isfile(file_path):
                raise Exception(
                    "The dell-environment.yaml file does not exist")
            if not os.path.isfile(dpdk_file):
                raise Exception(
                    "The neutron-ovs-dpdk.yaml file does not exist")
            if not ovs_dpdk:
                cmds.append('sed -i "s|  # NovaSchedulerDefaultFilters|  ' +
                            'NovaSchedulerDefaultFilters|" ' + file_path)
            cmds.extend(
                ('sed -i "s|DellComputeCount:.*|DellComputeCount: ' +
                 str(dell_compute_count) + '|" ' + file_path,
                 'sed -i "s|DellComputeHCICount:.*|DellComputeHCICount: ' +
                 str(dell_computehci_count) + '|" ' + file_path,
                 'sed -i "s|ControllerCount:.*|ControllerCount: ' +
                 str(controller_count) + '|" ' + file_path,
                 'sed -i "s|CephStorageCount:.*|CephStorageCount: ' +
                 str(ceph_storage_count) + '|" ' + file_path,
                 'sed -i "s|PowerflexStorageCount:.*|PowerflexStorageCount: ' +
                 str(dell_powerflex_count) + '|" ' + file_path,
                 'sed -i "s|OvercloudControllerFlavor:.*' +
                 '|OvercloudControllerFlavor: ' + str(controller_flavor) +
                 '|" ' + file_path, 'sed -i "s|OvercloudCephStorageFlavor:.*' +
                 '|OvercloudCephStorageFlavor: ' + str(ceph_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudSwiftStorageFlavor:.*' +
                 '|OvercloudSwiftStorageFlavor: ' + str(swift_storage_flavor) +
                 '|" ' + file_path,
                 'sed -i "s|OvercloudBlockStorageFlavor:.*' +
                 '|OvercloudBlockStorageFlavor: ' + str(block_storage_flavor) +
                 '|" ' + file_path, 'sed -i "s|NeutronNetworkVLANRanges:.*' +
                 '|NeutronNetworkVLANRanges: ' + 'physint:' + str(vlan_range) +
                 ',physext'
                 '|" ' + file_path))
            kernel_args = ''
            if sriov or ovs_dpdk:
                kernel_args = "iommu=pt intel_iommu=on"

            if enable_hugepage:
                hpg_num = self.nfv_params.calculate_hugepage_count(
                    hugepage_size)
                kernel_args += " default_hugepagesz=%s hugepagesz=%s" \
                    " hugepages=%s" \
                    % (hugepage_size, hugepage_size[0:-1], str(hpg_num))

            if enable_numa:
                node_uuid, node_data = self.nfv_params.select_compute_node()
                self.nfv_params.parse_data(node_data)
                self.nfv_params.get_all_cpus()
                self.nfv_params.get_host_cpus(hostos_cpu_count)
                if ovs_dpdk:
                    dpdk_nics = self.find_ifaces_by_keyword(
                        nic_env_file, 'Dpdk')
                    logger.debug("DPDK-NICs >>" + str(dpdk_nics))
                    self.nfv_params.get_pmd_cpus(mtu, dpdk_nics)
                    self.nfv_params.get_socket_memory(mtu, dpdk_nics)
                self.nfv_params.get_nova_cpus()
                self.nfv_params.get_isol_cpus()
                kernel_args += " isolcpus=%s" % self.nfv_params.isol_cpus
                cmds.append(
                    'sed -i "s|# NovaComputeCpuDedicatedSet:.*|NovaComputeCpuDedicatedSet: '
                    + self.nfv_params.nova_cpus + '|" ' + file_path)
            if kernel_args:
                cmds.append('sed -i "s|# DellComputeParameters:' +
                            '|DellComputeParameters:|" ' + file_path)
                cmds.append('sed -i "s|# KernelArgs:.*|KernelArgs: \\"' +
                            kernel_args + '\\" |" ' + file_path)
            if ovs_dpdk:
                cmds.append(
                    'sed -i "s|OvsDpdkCoreList:.*|OvsDpdkCoreList: \\"' +
                    self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append(
                    'sed -i "s|NovaComputeCpuSharedSet:.*|NovaComputeCpuSharedSet: \\"'
                    + self.nfv_params.host_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsPmdCoreList:.*|OvsPmdCoreList: \\"' +
                            self.nfv_params.pmd_cpus + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|OvsDpdkSocketMemory:' +
                            '.*|OvsDpdkSocketMemory: \\"' +
                            self.nfv_params.socket_mem + '\\" |" ' + dpdk_file)
                cmds.append('sed -i "s|IsolCpusList:.*|IsolCpusList: \\"' +
                            self.nfv_params.isol_cpus + '\\" |" ' + dpdk_file)

            if dell_powerflex_count > 0:
                cmds.append('sed -i "s|NovaEnableRbdBackend:.*' +
                            '|NovaEnableRbdBackend: false |" ' + file_path)
                cmds.append('sed -i "s|CinderEnableRbdBackend:.*' +
                            '|CinderEnableRbdBackend: false |" ' + file_path)
                cmds.append('sed -i "s|GlanceBackend:.*' +
                            '|GlanceBackend: cinder|" ' + file_path)

            cmds.append('sed -i "s|TimeZone:.*' + '|TimeZone: \\"' +
                        time_zone + '\\" |" ' + file_path)

            for cmd in cmds:
                status = os.system(cmd)
                if status != 0:
                    raise Exception("Failed to execute the command {}"
                                    " with error code {}".format(cmd, status))
                logger.debug("cmd: {}".format(cmd))

        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to modify the dell_environment.yaml"
                            " at location {}".format(file_path))

    def get_dell_compute_nodes_hostnames(self, nova):
        try:
            logger.info("Getting dellnfv compute node hostnames")

            # Get list of dell nfv nodes
            dell_hosts = []

            for host in nova.servers.list():
                if "dell-compute" in host.name:
                    hostname = str(host.name)
                    dell_hosts.append(hostname)

            return dell_hosts
        except Exception as error:
            message = "Exception {}: {}".format(
                type(error).__name__, str(error))
            logger.error(message)
            raise Exception("Failed to get the Dell Compute nodes.")
コード例 #17
0
    def _get_nodes(self):
        os_auth_url, os_tenant_name, os_username, os_password, \
            os_user_domain_name, os_project_domain_name = \
            CredentialHelper.get_undercloud_creds()
        auth_url = os_auth_url + "v3"

        provisioning_network = NetworkHelper.get_provisioning_network()

        kwargs = {
            'os_username': os_username,
            'os_password': os_password,
            'os_auth_url': os_auth_url,
            'os_tenant_name': os_tenant_name,
            'os_user_domain_name': os_user_domain_name,
            'os_project_domain_name': os_project_domain_name
        }
        i_client = ironic_client.get_client(1, **kwargs)

        auth = v3.Password(auth_url=auth_url,
                           username=os_username,
                           password=os_password,
                           project_name=os_tenant_name,
                           user_domain_name=os_user_domain_name,
                           project_domain_name=os_project_domain_name)

        sess = session.Session(auth=auth)
        n_client = nova_client.Client(2, session=sess)

        # Build up a dictionary that maps roles to a list of IPs for that role
        self.node_roles_to_nodes = {}

        self.logger.debug("Querying ironic and nova for nodes")
        nodes = i_client.node.list(
            fields=["uuid", "instance_uuid", "properties"])
        for node in nodes:
            uuid = node.uuid
            instance_uuid = node.instance_uuid

            # Handle the case where we have a node in ironic that's not in nova
            # (possibly due to the node being in maintenance mode in ironic or
            #  the user not assigning a role to a node, etc)
            if instance_uuid is None:
                self.logger.debug("Ironic node " + uuid + " has no "
                                  "corresponding instance in nova.  Skipping")
                continue

            capabilities = node.properties["capabilities"]
            capabilities = dict(c.split(':') for c in capabilities.split(','))

            # Role is the 'profile' capability when node placement is not
            # in use. Otherwise it's encoded in the 'node' capability.
            if 'profile' in capabilities:
                role = capabilities['profile']
            elif 'node' in capabilities:
                role = capabilities['node']
                # Trim the trailing "-N" where N is the node number
                role = role[:role.rindex('-')]
            else:
                self.logger.error(
                    "Failed to determine role of node {}".format(node))
                sys.exit(1)

            server = n_client.servers.get(instance_uuid)
            for address in server.addresses["ctlplane"]:
                ip = address["addr"]
                if IPAddress(ip) in provisioning_network:
                    break

            self.logger.debug("Got node:\n"
                              "    uuid=" + uuid + "\n"
                              "    ip=" + ip + "\n"
                              "    role=" + role + "\n"
                              "    instance_uuid=" + instance_uuid)

            if role not in self.node_roles_to_nodes:
                self.node_roles_to_nodes[role] = []

            self.node_roles_to_nodes[role].append(ip)

        self.logger.debug("node_roles_to_nodes: " +
                          str(self.node_roles_to_nodes))
コード例 #18
0
def main():

    parser = argparse.ArgumentParser()
    group = parser.add_mutually_exclusive_group()
    group.add_argument("-compute",
                       "--compute",
                       dest="compute_node_ip",
                       action="store",
                       default='')
    group.add_argument("-controller",
                       "--controller",
                       dest="controller_node_ip",
                       action="store",
                       default='')
    parser.add_argument('-f',
                        '--file',
                        help='name of json file containing the node being set',
                        default=Constants.INSTACKENV_FILENAME)
    parser.add_argument("-l",
                        "--logging-level",
                        default="INFO",
                        type=logging_level,
                        help="""logging level defined by the logging module;
                                choices include CRITICAL, ERROR, WARNING,
                                INFO, and DEBUG""",
                        metavar="LEVEL")
    args = parser.parse_args()

    home_dir = os.path.expanduser('~')
    undercloudrc_name = os.path.join(home_dir, 'stackrc')
    oc_stack_name = CredentialHelper.get_overcloud_name()
    ssh_config = os.path.join(home_dir, '.ssh/config')
    undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
    instack_file = os.path.join(home_dir, args.file)

    # Run update_ssh_config.py
    cmd = os.path.join(os.getcwd(), 'update_ssh_config.py')
    os.system(cmd)

    # Run identify_nodes.py > ~/undercloud_nodes.txt
    cmd = os.path.join(os.getcwd(),
                       'identify_nodes.py > ~/undercloud_nodes.txt')
    os.system(cmd)

    # Get first_controller_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/cntl0/ {print $2}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node = p2.communicate()[0].rstrip()

    # Get first_controller_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_controller_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    controller_nodes_ip = p3.communicate()[0].split()

    # Get first_compute_node
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(
        shlex.split('awk \'/nova0/ || /compute0/ {print $2}\''),
        stdin=p1.stdout,
        stdout=subprocess.PIPE)
    first_compute_node = p2.communicate()[0].rstrip()

    # Get first_compute_node_ip
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova0|compute0"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    first_compute_node_ip = p3.communicate()[0].rstrip()

    # Get COMPUTE_NODES_IP
    p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
                          stdin=p2.stdout,
                          stdout=subprocess.PIPE)
    compute_nodes_ip = p3.communicate()[0].split()

    # Get COMPUTE_NOVA_NAMES
    p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
    p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
                          stdin=p1.stdout,
                          stdout=subprocess.PIPE)
    compute_nova_names = p2.communicate()[0].split()

    oc_auth_url, oc_tenant_name, oc_username, oc_password = \
        CredentialHelper.get_overcloud_creds()

    domainname = get_domainname(first_compute_node_ip)

    LOG.setLevel(args.logging_level)

    # Install RA instanceHA Configuration
    if args.compute_node_ip == '' and args.controller_node_ip == '':
        LOG.info("***  Configuring Instance HA for stack {}  ***".format(
            oc_stack_name))

        LOG.debug("home_dir: {}".format(home_dir))
        LOG.debug("oc_stack_name: {}".format(oc_stack_name))
        LOG.debug("oc_auth_url: {}".format(oc_auth_url))
        LOG.debug("oc_username: {}".format(oc_username))
        LOG.debug("oc_password: {}".format(oc_password))
        LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
        LOG.debug("first_controller_node: {}".format(first_controller_node))
        LOG.debug(
            "first_controller_node_ip: {}".format(first_controller_node_ip))
        LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
        LOG.debug("first_compute_node: {}".format(first_compute_node))
        LOG.debug("first_compute_node_ip: {}".format(first_compute_node_ip))
        LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
        LOG.debug("compute_nova_names: {}".format(compute_nova_names))
        LOG.debug("domainname: {}".format(domainname))

        if (verify_fencing(first_controller_node_ip) != "false"):
            LOG.debug("Stonith is enabled.")
        else:
            LOG.critical("!!! - Error: Fencing must be enabled.")
            LOG.info("Use agent_fencing.sh script to enable fencing.")
            sys.exit(-1)

        stop_disable_openstack_services(compute_nodes_ip)
        create_authkey(first_compute_node_ip)
        distribute_all_authkey(compute_nodes_ip, controller_nodes_ip)
        enable_start_pacemaker(compute_nodes_ip)
        create_nova_evacuate_resource(first_controller_node_ip, domainname)
        confirm_nova_evacuate_resource(first_controller_node_ip)
        tag_controllers_with_osprole(first_controller_node_ip)
        tag_the_control_plane(first_controller_node_ip)
        populate_compute_nodes_resources(first_controller_node_ip, domainname)
        add_compute_nodes_stonith_devices(compute_nodes_ip, undercloud_config,
                                          first_controller_node_ip,
                                          instack_file)
        create_fence_nova_device(first_controller_node_ip, domainname)
        enable_compute_nodes_recovery(first_controller_node_ip)
        create_compute_nodes_resources(compute_nodes_ip,
                                       first_controller_node_ip)
        enable_control_plane_services(first_controller_node_ip)
        final_resource_cleanup(first_controller_node_ip)

    # Execute Compute node addition
    if args.compute_node_ip != '':
        compute_node_ip = args.compute_node_ip.rstrip()
        if check_ip_validity(compute_node_ip):
            LOG.info("***  Adding a compute node {} to InstanceHA"
                     " configuration.".format(compute_node_ip))

            LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
            LOG.debug("compute_node_ip: {}".format(compute_node_ip))
            LOG.debug("first_controller_node_ip: {}".format(
                first_controller_node_ip))
            LOG.debug("undercloud_config: {}".format(undercloud_config))
            LOG.debug("instack_file: {}".format(instack_file))

            stop_disable_openstack_services(compute_nodes_ip)
            distribute_node_authkey(compute_node_ip)
            enable_start_compute_pacemaker(compute_node_ip)
            add_compute_node_stonith_devices(compute_node_ip,
                                             undercloud_config,
                                             first_controller_node_ip,
                                             instack_file)
            create_compute_node_resources(compute_node_ip,
                                          first_controller_node_ip)
            enable_control_plane_services(first_controller_node_ip)
            final_resource_cleanup(first_controller_node_ip)

        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                compute_node_ip))
            exit(-1)

    # Execute Controller node addition
    if args.controller_node_ip != '':
        controller_node_ip = args.controller_node_ip.rstrip()
        if check_ip_validity(controller_node_ip):
            LOG.info("***  Adding a controller node {} to InstanceHA"
                     " configuration.".format(controller_node_ip))

            LOG.debug("controller_node_ip: {}".format(controller_node_ip))
            LOG.debug("first_controller_node_ip: {}".format(
                first_controller_node_ip))

            distribute_node_authkey(controller_node_ip)
            tag_controllers_with_osprole(first_controller_node_ip)
            final_resource_cleanup(first_controller_node_ip)

        else:
            LOG.critical("!!! - Fatal Error: Invalid IP address: {}".format(
                controller_node_ip))
            sys.exit(-1)