def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) cmd = "source ~/stackrc;openstack baremetal node list -f value -c UUID" nodes = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) for node in nodes.splitlines(): if len(node) < 1: continue # Power off the node logger.info("Powering off node " + node) cmd = "openstack baremetal node power off " + node logger.debug(" {}".format(cmd)) os.system(cmd) # Set the first boot device to PXE logger.info("Setting the provisioning NIC to PXE boot on node " + node) cmd = "openstack baremetal node boot device set " + node + " pxe" logger.debug(" {}".format(cmd)) os.system(cmd) cmd = "openstack baremetal node set --driver-info " + \ "force_persistent_boot_device=True " + node logger.debug(" {}".format(cmd)) os.system(cmd) if not args.skip: cmd = "openstack overcloud node introspect --all-manageable --provide" logger.info("Assigning the kernel and ramdisk image to all nodes") logger.debug(cmd) os.system(cmd)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--overcloud_name", default=None, help="The name of the overcloud") parser.add_argument("--edge_site", default=None, dest="node_type", help="The name of edge site being configured") parser.add_argument("--edge_site_data", default=None, dest="node_type_data", help="The edge site metadata") parser.add_argument("--debug", default=False, action='store_true', help="Turn on debugging for this script") LoggingHelper.add_argument(parser) args = parser.parse_args() LoggingHelper.configure_logging(args.logging_level) config_edge = ConfigEdge(args.overcloud_name, args.node_type, args.node_type_data) params = config_edge.fetch_nfv_parameters() logger.debug(">>>>>> nfv parameters {}".format(str(params))) return json.dumps(params)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) cmd = "openstack baremetal node list --fields uuid properties -f json" nodes = json.loads( subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)) for node in nodes: props = node["Properties"] _node_type = props["node_type"] if "node_type" in props else None match = ((not args.node_type) or (bool(_node_type) and args.node_type == _node_type)) if (not match): continue uuid = node["UUID"] # Power off the node logger.info("Powering off node " + uuid) cmd = "openstack baremetal node power off " + uuid logger.debug(" {}".format(cmd)) os.system(cmd) # Set the first boot device to PXE logger.info("Setting the provisioning NIC to PXE boot on node %s ", uuid) cmd = ("openstack baremetal node boot device set --persistent " + uuid + " pxe") logger.debug(" {}".format(cmd)) os.system(cmd) cmd = ("openstack baremetal node set --driver-info " "force_persistent_boot_device=True " + uuid) logger.debug(" {}".format(cmd)) os.system(cmd)
def parse_arguments(): parser = argparse.ArgumentParser( description="Performs initial configuration of an iDRAC.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) ArgHelper.add_ip_service_tag(parser) parser.add_argument("-p", "--pxe-nic", help="""fully qualified device descriptor (FQDD) of network interface to PXE boot from""", metavar="FQDD") parser.add_argument("-c", "--change-password", help="The new password for the root user") parser.add_argument("-i", "--skip-nic-config", action='store_true', help="Use to skip NIC configuration") ArgHelper.add_instack_arg(parser) ArgHelper.add_model_properties_arg(parser) LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(): parser = argparse.ArgumentParser( description="Prepares the overcloud nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) LoggingHelper.add_argument(parser) return parser.parse_args()
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() nodes = get_nodes(ironic_client, args.node_type) introspect_nodes(args.in_band, ironic_client, nodes)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() node = IronicHelper.get_ironic_node(ironic_client, args.ip_service_tag) introspect_nodes.introspect_nodes(args.in_band, ironic_client, [node])
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) # Load the nodes into ironic logger.info("Importing {} into ironic".format(args.node_definition)) cmd = ["openstack", "baremetal", "import", "--json", args.node_definition] exit_code, stdin, stderr = Exec.execute_command(cmd) if exit_code != 0: logger.error("Failed to import nodes into ironic: {}, {}".format( stdin, stderr)) sys.exit(1) # Load the instack file try: json_file = os.path.expanduser(args.node_definition) with open(json_file, 'r') as instackenv_json: instackenv = json.load(instackenv_json) except (IOError, ValueError): logger.exception("Failed to load node definition file {}".format( args.node_definition)) sys.exit(1) nodes = instackenv["nodes"] # Loop thru the nodes for node in nodes: # Find the node in ironic ironic_client = IronicHelper.get_ironic_client() ironic_node = IronicHelper.get_ironic_node(ironic_client, node["pm_addr"]) # Set the model and service tag on the node logger.info("Setting model ({}), service tag ({}), and provisioning " "MAC ({}) on {}".format( node["model"] if "model" in node else "None", node["service_tag"], node["provisioning_mac"] if "provisioning_mac" in node else "None", node["pm_addr"])) patch = [{'op': 'add', 'value': node["service_tag"], 'path': '/properties/service_tag'}] if "model" in node: patch.append({'op': 'add', 'value': node["model"], 'path': '/properties/model'}) if "provisioning_mac" in node: patch.append({'op': 'add', 'value': node["provisioning_mac"], 'path': '/properties/provisioning_mac'}) ironic_client.node.update(ironic_node.uuid, patch)
def parse_arguments(): parser = argparse.ArgumentParser( description="Introspects a specified overcloud node.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) ArgHelper.add_ip_service_tag(parser) ArgHelper.add_inband_arg(parser) LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(): parser = argparse.ArgumentParser( description="Loads nodes into ironic.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) ArgHelper.add_instack_arg(parser) LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(): parser = argparse.ArgumentParser( description="Prepares the overcloud nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) LoggingHelper.add_argument(parser) parser.add_argument('-s', '--skip', action='store_true', default=False, help="Skip assigning the kernel and ramdisk images to " "all nodes") return parser.parse_args()
def parse_arguments(): parser = argparse.ArgumentParser( description="Prepares the overcloud nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) LoggingHelper.add_argument(parser) parser.add_argument("-n", "--node_type", default=None, help="""Prepare nodes for this specific node type only""") return parser.parse_args()
def parse_arguments(dashboard_user): """ Parses the input argments """ parser = argparse.ArgumentParser( description="Configures the Ceph Storage Dashboard and Ceph nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("-dashboard_addr", "--dashboard_addr", help="The IP address of the Ceph Storage Dashboard " "on the external network", required=True) parser.add_argument("-dashboard_pass", "--dashboard_pass", help="The password of the Ceph Storage Dashboard " "node ", required=True) parser.add_argument("-subUser", "--subUser", help="The username for Red Hat Subscription Access", action='store', required=False) parser.add_argument("-subPass", "--subPass", help="The password for Red Hat Subscription Access", action='store', required=False) parser.add_argument("-satOrg", "--satOrg", help="The Red Hat Satellite Organization", action='store', required=False) parser.add_argument("-satKey", "--satKey", help="The Red Hat Satellite Activation Key", action='store', required=False) parser.add_argument("-physId", "--physId", help="The subscription poolid for Physical Nodes", required=True) parser.add_argument("-cephId", "--cephId", help="The subscription poolid for Ceph Nodes", required=True) LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(sah_user): parser = argparse.ArgumentParser( description="Configures DHCP server on SAH node for use by iDRACs.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("sah_ip", help="""The IP address of the SAH node on the provisioning network""") parser.add_argument("-p", "--password", help="The {} password of the SAH node".format( sah_user)) LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(): parser = argparse.ArgumentParser( description="Performs initial configuration of iDRACs.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) ArgHelper.add_instack_arg(parser) ArgHelper.add_model_properties_arg(parser) LoggingHelper.add_argument(parser) parser.add_argument("-j", "--json_config", default=None, help="""JSON that specifies the PXE NIC FQDD and the " "new password for each overcloud node""") return parser.parse_args()
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) try: model_properties = Utils.get_model_properties(args.model_properties) config_idrac(None, args.ip_service_tag, args.node_definition, model_properties, args.pxe_nic, args.change_password) except ValueError as ex: LOG.error("An error occurred while configuring iDRAC {}: {}".format( args.ip_service_tag, ex.message)) sys.exit(1) except Exception as ex: LOG.exception("An error occurred while configuring iDRAC {}: " "{}".format(args.ip_service_tag, ex.message)) sys.exit(1)
def main(): parser = argparse.ArgumentParser( description="Queries an iDRAC to determine if it is ready to process " "commands.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) ArgHelper.add_ip_service_tag(parser) ArgHelper.add_instack_arg(parser) LoggingHelper.add_argument(parser) args = parser.parse_args() LoggingHelper.configure_logging(args.logging_level) ip_service_tag = args.ip_service_tag node_definition = args.node_definition return_code = 0 try: node = CredentialHelper.get_node_from_instack(ip_service_tag, node_definition) if not node: raise ValueError("Unable to find {} in {}".format( ip_service_tag, node_definition)) drac_ip = node["pm_addr"] drac_user = node["pm_user"] drac_password = node["pm_password"] drac_client = DRACClient(drac_ip, drac_user, drac_password) ready = drac_client.is_idrac_ready() if ready: LOG.info("iDRAC is ready") else: return_code = 1 LOG.info("iDRAC is NOT ready") except: # noqa: E501 LOG.exception("An exception occurred:") return_code = 2 sys.exit(return_code)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) ironic_client = IronicHelper.get_ironic_client() for node in ironic_client.node.list(detail=True): ip, username, password = \ CredentialHelper.get_drac_creds_from_node(node) # Power off the node cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \ "power off".format(ip, username, password) logger.info("Powering off {}".format(ip)) logger.debug(" {}".format(cmd)) os.system(cmd) # Set the first boot device to PXE cmd = "ipmitool -H {} -I lanplus -U {} -P '{}' chassis " \ "bootdev pxe options=persistent".format(ip, username, password) logger.info( "Setting the provisioning NIC to PXE boot on {}".format(ip)) logger.debug(" {}".format(cmd)) os.system(cmd) if not args.skip: os_auth_url, os_tenant_name, os_username, os_password = \ CredentialHelper.get_undercloud_creds() cmd = "openstack baremetal configure boot " \ "--os-auth-url {} " \ "--os-project-name {} " \ "--os-username {} " \ "--os-password {} " \ "".format(os_auth_url, os_tenant_name, os_username, os_password) logger.info("Assigning the kernel and ramdisk image to all nodes") logger.debug(cmd) os.system(cmd)
def parse_arguments(rhscon_user): """ Parses the input argments """ parser = argparse.ArgumentParser( description="Configures the Storage Console and overcloud Ceph nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("rhscon_addr", help="The IP address of the Storage Console on the" " external network", metavar="ADDR") parser.add_argument("rhscon_pass", help="The {} password of the Storage Console".format( rhscon_user), metavar="PASSWORD") LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(dashboard_user): """ Parses the input argments """ parser = argparse.ArgumentParser( description="Configures the Ceph Storage Dashboard and Ceph nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("dashboard_addr", help="The IP address of the Ceph Storage Dashboard " "on the external network", metavar="ADDR") parser.add_argument("dashboard_pass", help="The {} password of the Ceph Storage " "Dashboard".format(dashboard_user), metavar="PASSWORD") LoggingHelper.add_argument(parser) return parser.parse_args()
def parse_arguments(dashboard_user): """ Parses the input argments """ parser = argparse.ArgumentParser( description="Configures the Ceph Storage Dashboard and Ceph nodes.", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("dashboard_addr", help="The IP address of the Ceph Storage Dashboard " "on the external network", metavar="ADDR") parser.add_argument("dashboard_pass", help="The password of the Ceph Storage Dashboard " "node ", metavar="PASSWORD") parser.add_argument("subUser", help="The username for Red Hat Subscription Access" " ", metavar="SUBSCRIPTION_USER") parser.add_argument("subPass", help="The password for Red Hat Subscription Access" " ", metavar="SUBSCRIPTION_PASSWORD") parser.add_argument("physId", help="The subscription poolid for Physical Nodes" " ", metavar="PHYSICAL_POOL_ID") parser.add_argument("cephId", help="The subscription poolid for Ceph Nodes" " ", metavar="CEPH_POOL_ID") LoggingHelper.add_argument(parser) return parser.parse_args()
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) try: json_file = os.path.expanduser(args.node_definition) with open(json_file, 'r') as instackenv_json: instackenv = json.load(instackenv_json) except (IOError, ValueError): LOG.exception("Failed to load node definition file {}".format( args.node_definition)) sys.exit(1) json_config = None if args.json_config is not None: try: json_config = json.loads(args.json_config) except: LOG.exception("Failed to parse json_config data") sys.exit(1) try: model_properties = Utils.get_model_properties(args.model_properties) # Configure all the nodes if "nodes" not in instackenv: raise ValueError("{} must contain an array of " "\"nodes\"".format(args.node_definition)) instack_lock = threading.Lock() threads = [] for node in instackenv["nodes"]: pxe_nic = None password = None if json_config is not None: node_config = None if node["pm_addr"] in json_config.keys(): node_config = json_config[node["pm_addr"]] elif node["service_tag"] in json_config.keys(): node_config = json_config[node["service_tag"]] if node_config is not None: if "pxe_nic" in node_config.keys(): pxe_nic = node_config["pxe_nic"] if "password" in node_config.keys(): password = node_config["password"] thread = ThreadWithExHandling(LOG, target=config_idrac.config_idrac, args=(instack_lock, node["pm_addr"], args.node_definition, model_properties, pxe_nic, password)) threads.append(thread) thread.start() for thread in threads: thread.join() failed_threads = 0 for thread in threads: if thread.ex is not None: failed_threads += 1 if failed_threads == 0: LOG.info("Successfully configured all iDRACs") else: LOG.info("Failed to configure {} out of {} iDRACs".format( failed_threads, len(threads))) sys.exit(1) except ValueError as ex: LOG.error(ex) sys.exit(1) except Exception as ex: LOG.exception(ex.message) sys.exit(1)
def main(): try: global args parser = argparse.ArgumentParser() parser.add_argument("--controllers", dest="num_controllers", type=int, default=3, help="The number of controller nodes") parser.add_argument("--dell-computes", dest="num_dell_computes", type=int, required=True, help="The number of dell compute nodes") parser.add_argument("--storage", dest="num_storage", type=int, required=True, help="The number of storage nodes") parser.add_argument("--enable_hugepages", action='store_true', default=False, help="Enable/Disable hugepages feature") parser.add_argument("--enable_numa", action='store_true', default=False, help="Enable/Disable numa feature") parser.add_argument("--vlans", dest="vlan_range", required=True, help="The VLAN range to use for Neutron in " " xxx:yyy format") parser.add_argument("--nic_env_file", default="5_port/nic_environment.yaml", help="The NIC environment file to use") parser.add_argument("--ntp", dest="ntp_server_fqdn", default="0.centos.pool.ntp.org", help="The FQDN of the ntp server to use") parser.add_argument("--timeout", default="120", help="The amount of time in minutes to allow the " "overcloud to deploy") parser.add_argument("--overcloud_name", default=None, help="The name of the overcloud") parser.add_argument("--hugepages_size", dest="hugepages_size", required=False, default="1GB", help="HugePages size") parser.add_argument("--hostos_cpu_count", dest="hostos_cpu_count", required=False, default="4", help="HostOs Cpus to be configured") parser.add_argument("--mariadb_max_connections", dest="mariadb_max_connections", required=False, default="15360", help="Maximum number of connections for MariaDB") parser.add_argument("--innodb_buffer_pool_size", dest="innodb_buffer_pool_size", required=False, default="dynamic", help="InnoDB buffer pool size") parser.add_argument("--innodb_buffer_pool_instances", dest="innodb_buffer_pool_instances", required=False, default="16", help="InnoDB buffer pool instances.") parser.add_argument('--enable_dellsc', action='store_true', default=False, help="Enable cinder Dell Storage Center backend") parser.add_argument('--disable_rbd', action='store_true', default=False, help="Disable cinder Ceph and rbd backend") parser.add_argument('--dvr_enable', action='store_true', default=False, help="Enables Distributed Virtual Routing") parser.add_argument('--static_ips', action='store_true', default=False, help="Specify the IPs on the overcloud nodes") parser.add_argument('--static_vips', action='store_true', default=False, help="Specify the VIPs for the networks") parser.add_argument('--ovs_dpdk', action='store_true', default=False, help="Enable OVS+DPDK") parser.add_argument('--sriov', action='store_true', default=False, help="Enable SR-IOV") parser.add_argument('--node_placement', action='store_true', default=False, help="Control which physical server is assigned " "which instance") parser.add_argument("--debug", default=False, action='store_true', help="Indicates if the deploy-overcloud script " "should be run in debug mode") parser.add_argument("--mtu", dest="mtu", type=int, required=True, default=1500, help="Tenant Network MTU") LoggingHelper.add_argument(parser) args = parser.parse_args() LoggingHelper.configure_logging(args.logging_level) p = re.compile('\d+:\d+') # noqa: W605 if not p.match(args.vlan_range): raise ValueError("Error: The VLAN range must be a number followed " "by a colon, followed by another number") os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() # Set up the default flavors control_flavor = "control" ceph_storage_flavor = "ceph-storage" swift_storage_flavor = "swift-storage" block_storage_flavor = "block-storage" if args.node_placement: validate_node_placement() # If node-placement is specified, then the baremetal flavor must # be used control_flavor = BAREMETAL_FLAVOR ceph_storage_flavor = BAREMETAL_FLAVOR swift_storage_flavor = BAREMETAL_FLAVOR block_storage_flavor = BAREMETAL_FLAVOR # Validate that the NIC envronment file exists nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs", args.nic_env_file) if not os.path.isfile(nic_env_file): raise ValueError("\nError: The nic_env_file {} does not " "exist!".format(nic_env_file)) # Apply any patches required on the Director itself. This is done each # time the overcloud is deployed (instead of once, after the Director # is installed) in order to ensure an update to the Director doesn't # overwrite the patch. # logger.info("Applying patches to director...") # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh') # status = os.system(cmd) # if status != 0: # raise ValueError("\nError: {} failed, unable to continue. See " # "the comments in that file for additional " # "information".format(cmd)) # Pass the parameters required by puppet which will be used # to enable/disable dell nfv features # Edit the dellnfv_environment.yaml # If disabled, default values will be set and # they won't be used for configuration # Create ConfigOvercloud object config = ConfigOvercloud(args.overcloud_name) # Remove this when Numa siblings added # Edit the dellnfv_environment.yaml config.edit_environment_files( args.mtu, args.enable_hugepages, args.enable_numa, args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk, args.sriov, nic_env_file, args.mariadb_max_connections, args.innodb_buffer_pool_size, args.innodb_buffer_pool_instances, args.num_controllers, args.num_storage, control_flavor, ceph_storage_flavor, swift_storage_flavor, block_storage_flavor, args.vlan_range, args.num_dell_computes) # Launch the deployment overcloud_name_opt = "" if args.overcloud_name is not None: overcloud_name_opt = "--stack " + args.overcloud_name debug = "" if args.debug: debug = "--debug" # The order of the environment files is important as a later inclusion # overrides resources defined in prior inclusions. # The roles_data.yaml must be included at the beginning. # This is needed to enable the custome role Dell Compute. # It overrides the default roles_data.yaml env_opts = "-r ~/pilot/templates/roles_data.yaml" # The network-environment.yaml must be included after the # network-isolation.yaml env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "network-isolation.yaml" \ " -e ~/pilot/templates/network-environment.yaml" \ " -e {}" \ " -e ~/pilot/templates/ceph-osd-config.yaml" \ "".format(nic_env_file) # The static-ip-environment.yaml must be included after the # network-environment.yaml if args.static_ips: env_opts += " -e ~/pilot/templates/static-ip-environment.yaml" # The static-vip-environment.yaml must be included after the # network-environment.yaml if args.static_vips: env_opts += " -e ~/pilot/templates/static-vip-environment.yaml" # The neutron-ovs-dvr.yaml.yaml must be included after the # network-environment.yaml if args.dvr_enable: env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml" if args.node_placement: env_opts += " -e ~/pilot/templates/node-placement.yaml" # The dell-environment.yaml must be included after the # storage-environment.yaml and ceph-radosgw.yaml env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "storage-environment.yaml" \ " -e ~/overcloud_images.yaml" \ " -e ~/pilot/templates/dell-environment.yaml" \ " -e ~/pilot/templates/overcloud/environments/" \ "puppet-pacemaker.yaml" host_config = False if args.enable_hugepages or args.enable_numa: env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "host-config-and-reboot.yaml" host_config = True if args.ovs_dpdk: if not args.enable_hugepages or not args.enable_numa: raise ValueError("Both hugepages and numa must be" + "enabled in order to use OVS-DPDK") else: env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml" if args.sriov: env_opts += " -e ~/pilot/templates/neutron-sriov.yaml" env_opts += " -e ~/pilot/templates/ovs-hw-offload.yaml" if not host_config: env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "host-config-and-reboot.yaml" if args.enable_dellsc: env_opts += " -e ~/pilot/templates/dell-cinder-backends.yaml" cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \ " {}" \ " --log-file ~/pilot/overcloud_deployment.log" \ " -t {}" \ " {}" \ " --templates ~/pilot/templates/overcloud" \ " -e /usr/share/openstack-tripleo-heat-templates/" \ "environments/ceph-ansible/ceph-ansible.yaml" \ " -e /usr/share/openstack-tripleo-heat-templates/" \ "environments/ceph-ansible/ceph-rgw.yaml" \ " {}" \ " --libvirt-type kvm" \ " --ntp-server {}" \ "".format(debug, args.timeout, overcloud_name_opt, env_opts, args.ntp_server_fqdn, ) with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'), 'w') as f: f.write(cmd.replace(' -', ' \\\n -')) f.write('\n') print cmd start = time.time() status = run_deploy_command(cmd) end = time.time() logger.info('\nExecution time: {} (hh:mm:ss)'.format( time.strftime('%H:%M:%S', time.gmtime(end - start)))) logger.info('Fetching SSH keys...') update_ssh_config() if status == 0: horizon_url = finalize_overcloud() logger.info("\nDeployment Completed") else: horizon_url = None logger.info('Overcloud nodes:') identify_nodes() if horizon_url: logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url)) except Exception as err: print >> sys.stderr, err sys.exit(1)
def main(): try: global args parser = argparse.ArgumentParser() parser.add_argument("--controllers", dest="num_controllers", type=int, default=3, help="The number of controller nodes") parser.add_argument("--dell-computes", dest="num_dell_computes", type=int, required=True, help="The number of dell compute nodes") parser.add_argument("--dell-computeshci", dest="num_dell_computeshci", type=int, required=True, help="The number of dell hci compute nodes") parser.add_argument("--storage", dest="num_storage", type=int, required=True, help="The number of storage nodes") parser.add_argument("--powerflex", dest="num_powerflex", type=int, required=True, help="The number of powerflex storage nodes") parser.add_argument("--enable_hugepages", action='store_true', default=False, help="Enable/Disable hugepages feature") parser.add_argument("--enable_numa", action='store_true', default=False, help="Enable/Disable numa feature") parser.add_argument("--vlans", dest="vlan_range", required=True, help="The VLAN range to use for Neutron in " " xxx:yyy format") parser.add_argument("--nic_env_file", default="5_port/nic_environment.yaml", help="The NIC environment file to use") parser.add_argument("--ntp", dest="ntp_server_fqdn", default="0.centos.pool.ntp.org", help="The FQDN of the ntp server to use") parser.add_argument("--timezone", dest="time_zone", default="America/Chicago", help="The timezone to use") parser.add_argument("--timeout", default="300", help="The amount of time in minutes to allow the " "overcloud to deploy") parser.add_argument("--overcloud_name", default=None, help="The name of the overcloud") parser.add_argument("--hugepages_size", dest="hugepages_size", required=False, default="1GB", help="HugePages size") parser.add_argument("--hostos_cpu_count", dest="hostos_cpu_count", required=False, default="4", help="HostOs Cpus to be configured") parser.add_argument('--enable_dellsc', action='store_true', default=False, help="Enable cinder Dell Storage Center backend") parser.add_argument('--enable_unity', action='store_true', default=False, help="Enable Dell EMC Unity backend") parser.add_argument('--enable_unity_manila', action='store_true', default=False, help="Enable Dell EMC Unity Manila backend") parser.add_argument('--enable_powermax', action='store_true', default=False, help="Enable Dell EMC Powermax backend") parser.add_argument('--powermax_protocol', dest='powermax_protocol', required=False, default="iSCSI", help="Dell EMC Powermax Protocol - iSCSI or FC") parser.add_argument('--enable_powermax_manila', action='store_true', default=False, help="Enable Dell EMC PowerMax Manila backend") parser.add_argument('--enable_powerstore', action='store_true', default=False, help="Enable Dell EMC Powerstore backend") parser.add_argument('--disable_rbd', action='store_true', default=False, help="Disable cinder Ceph and rbd backend") parser.add_argument('--octavia_enable', action='store_true', default=False, help="Enables Octavia Load Balancer") parser.add_argument('--octavia_user_certs_keys', action='store_true', default=False, help="Enables Octavia Load Balancer with " "user provided certs and keys") parser.add_argument('--dvr_enable', action='store_true', default=False, help="Enables Distributed Virtual Routing") parser.add_argument('--barbican_enable', action='store_true', default=False, help="Enables Barbican key manager") parser.add_argument('--static_ips', action='store_true', default=False, help="Specify the IPs on the overcloud nodes") parser.add_argument('--static_vips', action='store_true', default=False, help="Specify the VIPs for the networks") parser.add_argument('--ovs_dpdk', action='store_true', default=False, help="Enable OVS+DPDK") parser.add_argument('--sriov', action='store_true', default=False, help="Enable SR-IOV") parser.add_argument('--hw_offload', action='store_true', default=False, help="Enable SR-IOV Offload") parser.add_argument('--sriov_interfaces', dest="sriov_interfaces", default=False, help="SR-IOV interfaces count") parser.add_argument('--node_placement', action='store_true', default=False, help="Control which physical server is assigned " "which instance") parser.add_argument("--debug", default=False, action='store_true', help="Indicates if the deploy-overcloud script " "should be run in debug mode") parser.add_argument("--mtu", dest="mtu", type=int, required=True, default=1500, help="Tenant Network MTU") parser.add_argument("--dashboard_enable", action='store_true', default=False, help="Enable the ceph dashboard deployment") parser.add_argument('--network_data', action='store_true', default=False, help="Use network_data.yaml to create edge site " "networks") LoggingHelper.add_argument(parser) args = parser.parse_args() LoggingHelper.configure_logging(args.logging_level) p = re.compile('\d+:\d+') # noqa: W605 if not p.match(args.vlan_range): raise ValueError("Error: The VLAN range must be a number followed " "by a colon, followed by another number") os_auth_url, os_tenant_name, os_username, os_password, \ os_user_domain_name, os_project_domain_name = \ CredentialHelper.get_undercloud_creds() # Set up the default flavors control_flavor = "control" ceph_storage_flavor = "ceph-storage" swift_storage_flavor = "swift-storage" block_storage_flavor = "block-storage" if args.node_placement: validate_node_placement() # If node-placement is specified, then the baremetal flavor must # be used control_flavor = BAREMETAL_FLAVOR ceph_storage_flavor = BAREMETAL_FLAVOR swift_storage_flavor = BAREMETAL_FLAVOR block_storage_flavor = BAREMETAL_FLAVOR # Validate that the NIC envronment file exists nic_env_file = os.path.join(home_dir, "pilot/templates/nic-configs", args.nic_env_file) if not os.path.isfile(nic_env_file): raise ValueError("\nError: The nic_env_file {} does not " "exist!".format(nic_env_file)) # Apply any patches required on the Director itself. This is done each # time the overcloud is deployed (instead of once, after the Director # is installed) in order to ensure an update to the Director doesn't # overwrite the patch. # logger.info("Applying patches to director...") # cmd = os.path.join(home_dir, 'pilot', 'patch-director.sh') # status = os.system(cmd) # if status != 0: # raise ValueError("\nError: {} failed, unable to continue. See " # "the comments in that file for additional " # "information".format(cmd)) # Pass the parameters required by puppet which will be used # to enable/disable dell nfv features # Edit the dellnfv_environment.yaml # If disabled, default values will be set and # they won't be used for configuration # Create ConfigOvercloud object print("Configure environment file") config = ConfigOvercloud(args.overcloud_name) # Remove this when Numa siblings added # Edit the dellnfv_environment.yaml config.edit_environment_files( args.mtu, args.enable_hugepages, args.enable_numa, args.hugepages_size, args.hostos_cpu_count, args.ovs_dpdk, args.sriov, args.hw_offload, args.sriov_interfaces, nic_env_file, args.num_controllers, args.num_storage, control_flavor, ceph_storage_flavor, swift_storage_flavor, block_storage_flavor, args.vlan_range, args.time_zone, args.num_dell_computes, args.num_dell_computeshci, args.num_powerflex) # Launch the deployment overcloud_name_opt = "" if args.overcloud_name is not None: overcloud_name_opt = "--stack " + args.overcloud_name debug = "" if args.debug: debug = "--debug" # The order of the environment files is important as a later inclusion # overrides resources defined in prior inclusions. env_opts = "" # If there are edge sites we have to use network_data.yaml and # it must in as first argument. if args.network_data: env_opts += "-n ~/pilot/templates/network_data.yaml " # The roles_data.yaml must be included at the beginning. # This is needed to enable the custom role Dell Compute. # It overrides the default roles_data.yaml env_opts += "-r ~/pilot/templates/roles_data.yaml" # The static-ip-environment.yaml must be included after the # network-environment.yaml if args.static_ips: env_opts += " -e ~/pilot/templates/static-ip-environment.yaml" # The static-vip-environment.yaml must be included after the # network-environment.yaml if args.static_vips: env_opts += " -e ~/pilot/templates/static-vip-environment.yaml" # The configure-barbican.yaml must be included after the # network-environment.yaml if args.barbican_enable: env_opts += " -e ~/pilot/templates/configure-barbican.yaml" # The octavia.yaml must be included after the # network-environment.yaml if args.octavia_enable: env_opts += " -e ~/pilot/templates/octavia.yaml" if args.octavia_user_certs_keys is True: env_opts += " -e ~/pilot/templates/cert_keys.yaml" if args.node_placement: env_opts += " -e ~/pilot/templates/node-placement.yaml" # The neutron-ovs.yaml must be included before dell-environment.yaml to enable ovs and disable ovn # in OSP16.1. In case we need to use OVN in future, please delete this line env_opts += " -e ~/pilot/templates/overcloud/environments/services/neutron-ovs.yaml" # The neutron-ovs-dvr.yaml.yaml must be included after the # neutron-ovs.yaml if args.dvr_enable: env_opts += " -e ~/pilot/templates/neutron-ovs-dvr.yaml" # The dell-environment.yaml must be included after the # storage-environment.yaml and ceph-radosgw.yaml if args.num_powerflex > 0: env_opts += " -e ~/containers-prepare-parameter.yaml" \ " -e ~/pilot/templates/dell-environment.yaml" else: env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "storage-environment.yaml" \ " -e ~/containers-prepare-parameter.yaml" \ " -e ~/pilot/templates/dell-environment.yaml" if args.ovs_dpdk: if not args.enable_hugepages or not args.enable_numa: raise ValueError("Both hugepages and numa must be" + "enabled in order to use OVS-DPDK") else: env_opts += " -e ~/pilot/templates/neutron-ovs-dpdk.yaml" if args.sriov: if not args.enable_numa: raise ValueError("Numa cpu pinning must be " + "enabled in order to use SRIOV") else: env_opts += " -e ~/pilot/templates/neutron-sriov.yaml" if args.enable_dellsc: env_opts += " -e ~/pilot/templates/dellsc-cinder-config.yaml" if args.enable_unity: env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \ "container.yaml" env_opts += " -e ~/pilot/templates/dellemc-unity-cinder-" \ "backend.yaml" if args.enable_unity_manila: env_opts += " -e ~/pilot/templates/unity-manila-container.yaml" env_opts += " -e ~/pilot/templates/unity-manila-config.yaml" if args.enable_powermax: if args.powermax_protocol == "iSCSI": env_opts += " -e ~/pilot/templates/dellemc-powermax-iscsi-cinder-" \ "backend.yaml" else: env_opts += " -e ~/pilot/templates/dellemc-powermax-fc-cinder-" \ "backend.yaml" if args.enable_powermax_manila: env_opts += " -e ~/pilot/templates/powermax-manila-config.yaml" if args.enable_powerstore: env_opts += " -e ~/pilot/templates/dellemc-powerstore-cinder-backend.yaml" if args.num_powerflex > 0: env_opts += " -e ~/pilot/templates/overcloud/environments/powerflex-ansible/powerflex-ansible.yaml" env_opts += " -e ~/pilot/templates/dellemc-powerflex-cinder-backend.yaml" env_opts += " -e ~/pilot/templates/custom-dellemc-volume-mappings.yaml" else: env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-ansible.yaml" \ " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-rgw.yaml" if args.dashboard_enable: env_opts += " -e /usr/share/openstack-tripleo-heat-templates/environments/ceph-ansible/ceph-dashboard.yaml" env_opts += " -e ~/pilot/templates/ceph_dashboard_admin.yaml" # The network-environment.yaml must be included after other templates # for effective parameter overrides (External vlan default route) # The network-environment.yaml must be included after the network-isolation.yaml env_opts += " -e ~/pilot/templates/overcloud/environments/" \ "network-isolation.yaml" \ " -e ~/pilot/templates/network-environment.yaml" \ " -e {} " \ "-e ~/pilot/templates/site-name.yaml".format(nic_env_file) cmd = "cd ;source ~/stackrc; openstack overcloud deploy" \ " {}" \ " --log-file ~/pilot/overcloud_deployment.log" \ " -t {}" \ " {}" \ " --templates ~/pilot/templates/overcloud" \ " {}" \ " --libvirt-type kvm" \ " --no-cleanup" \ " --ntp-server {}" \ "".format(debug, args.timeout, overcloud_name_opt, env_opts, args.ntp_server_fqdn, ) with open(os.path.join(home_dir, 'pilot', 'overcloud_deploy_cmd.log'), 'w') as f: f.write(cmd.replace(' -', ' \\\n -')) f.write('\n') start = time.time() status = run_deploy_command(cmd) end = time.time() logger.info('\nExecution time: {} (hh:mm:ss)'.format( time.strftime('%H:%M:%S', time.gmtime(end - start)))) logger.info('Fetching SSH keys...') update_ssh_config() if status == 0: horizon_url = finalize_overcloud() logger.info("\nDeployment Completed") else: horizon_url = None logger.info('Overcloud nodes:') identify_nodes() if horizon_url: logger.info('\nHorizon Dashboard URL: {}\n'.format(horizon_url)) except Exception as err: print(sys.stderr, err) raise sys.exit(1)
def main(): sah_user = "******" args = parse_arguments(sah_user) LoggingHelper.configure_logging(args.logging_level, noisy_logger="paramiko") sah_password = args.password if not sah_password: sah_password = getpass("Enter the password for the " "{} user of the SAH node: ".format(sah_user)) management_net = NetworkHelper.get_management_network() dhcp_conf = os.path.join(os.path.expanduser('~'), 'pilot', 'dhcpd.conf') LOG.info("Creating dhcp configuration file {}".format(dhcp_conf)) dhcp_conf_template = os.path.join(os.path.expanduser('~'), 'pilot', 'templates', 'dhcpd.conf') try: in_file = open(dhcp_conf_template, 'r') file_text = in_file.read() except IOError: LOG.exception("Could not open dhcp.conf template file {}".format( dhcp_conf_template)) sys.exit(1) token_map = {} token_map["SUBNET"] = str(management_net.network) token_map["NETMASK"] = str(management_net.netmask) token_map["BROADCAST"] = str(management_net.broadcast) token_map["GATEWAY"] = NetworkHelper.get_management_network_gateway() for token in token_map.keys(): file_text = file_text.replace(token, token_map[token]) # Get the management network pools management_net_pools = NetworkHelper.get_management_network_pools() # Plug in the management pool ranges range_lines = "" for pool in management_net_pools: range_lines += " range {} {};\n".format( pool["start"], pool["end"]) file_text = re.sub("[ \t]*range[ \t]+POOL_START[ \t]+POOL_END;\n", range_lines, file_text) try: with open(dhcp_conf, 'w') as out_file: out_file.write(file_text) except IOError: LOG.exception("Could not open {} for writing.".format(dhcp_conf)) sys.exit(1) # scp dhcp.conf to the SAH dest_dhcp_conf = "/etc/dhcp/dhcpd.conf" LOG.info("Copying {} to {}@{}:{}".format(dhcp_conf, sah_user, args.sah_ip, dest_dhcp_conf)) Scp.put_file(args.sah_ip, dhcp_conf, dest_dhcp_conf, user=sah_user, password=sah_password) # The dhcp service will not start without an existing leases file, # so touch it to make sure it exists before starting the service dhcp_leases = "/var/lib/dhcpd/dhcpd.leases" LOG.info("Touching {}:{} as {}".format(args.sah_ip, dhcp_leases, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "touch " + dhcp_leases, user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to touch {}:{}: {}".format(args.sah_ip, dhcp_leases, std_err)) sys.exit(1) # Enable and restart the dhcp server on the SAH LOG.info("Enabling dhcpd on {} as {}".format(args.sah_ip, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "systemctl enable dhcpd", user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to enable dhcpd on {}: {}".format(args.sah_ip, std_err)) sys.exit(1) LOG.info("Restarting dhcpd on {} as {}".format(args.sah_ip, sah_user)) exit_code, _, std_err = Ssh.execute_command( args.sah_ip, "systemctl restart dhcpd", user=sah_user, password=sah_password) if exit_code != 0: LOG.error("Unable to restart dhcpd on {}: {}".format(args.sah_ip, std_err)) sys.exit(1)
def main(): args = parse_arguments() LoggingHelper.configure_logging(args.logging_level) # Load the nodes into ironic import_json = os.path.expanduser('~/nodes.json') content = json.load(open(args.node_definition)) for node in content['nodes']: for k in node.keys(): if k in DOWNSTREAM_ATTRS: node.pop(k) with open(import_json, 'w') as out: json.dump(content, out) logger.info("Importing {} into ironic".format(args.node_definition)) cmd = ["openstack", "overcloud", "node", "import", import_json] exit_code, stdin, stderr = Exec.execute_command(cmd) if exit_code != 0: logger.error("Failed to import nodes into ironic: {}, {}".format( stdin, stderr)) sys.exit(1) # Load the instack file try: json_file = os.path.expanduser(args.node_definition) with open(json_file, 'r') as instackenv_json: instackenv = json.load(instackenv_json) except (IOError, ValueError): logger.exception("Failed to load node definition file {}".format( args.node_definition)) sys.exit(1) nodes = instackenv["nodes"] # Loop thru the nodes for node in nodes: # Find the node in ironic ironic_client = IronicHelper.get_ironic_client() ironic_node = IronicHelper.get_ironic_node(ironic_client, node["pm_addr"]) # Set the model and service tag on the node logger.info("Setting model ({}), service tag ({}), and provisioning " "MAC ({}) on {}".format( node["model"] if "model" in node else "None", node["service_tag"], node["provisioning_mac"] if "provisioning_mac" in node else "None", node["pm_addr"])) patch = [{'op': 'add', 'value': node["service_tag"], 'path': '/properties/service_tag'}] if "model" in node: patch.append({'op': 'add', 'value': node["model"], 'path': '/properties/model'}) if "provisioning_mac" in node: patch.append({'op': 'add', 'value': node["provisioning_mac"], 'path': '/properties/provisioning_mac'}) if utils.Utils.is_enable_routed_networks(): logger.info("Adding port with physical address to node: %s", str(ironic_node.uuid)) kwargs = {'address': node["provisioning_mac"], 'physical_network': 'ctlplane', 'node_uuid': ironic_node.uuid} ironic_client.port.create(**kwargs) ironic_client.node.update(ironic_node.uuid, patch)