def reboot_system(self, system_name): from lab.nodes.lab_server import LabServer from lab.logger import lab_logger token = self.__cobbler.login(self._user, self._password) handle = self.__cobbler.get_system_handle(system_name, token) if self._force_pxe_boot: self.__cobbler.modify_system(handle, 'netboot_enabled', True, token) self.__cobbler.modify_system( handle, 'ks_meta', 'ProvTime={0}'.format(self._prov_time), token) rendered = self.__cobbler.get_system_as_rendered(system_name) server = LabServer(name=system_name, lab=None, ip=self.ip_for_system(rendered), username='******', password=self._system_password, hostname=rendered['hostname']) server.set_ipmi(ip=rendered['power_address'], username=rendered['power_user'], password=rendered['power_pass']) lab_logger.info( 'server {0} is being provisioned by PXE re-booting... (might take several minutes- please wait)' .format(server)) self.__cobbler.power_system(handle, "reboot", token) return server
def cleanup(self): from lab.logger import lab_logger self.delete_service_profiles() self.delete_mac_pools() for server_pool in self.cmd('scope org; sh server-pool | no-more | egrep -V "Name|----|MAC" | cut -f 5 -d " "').split(): self.cmd('scope org; delete server-pool {0}; commit-buffer'.format(server_pool)) for uuid_pool in self.cmd('scope org; sh uuid-suffix-pool | no-more | egrep -V "Name|----|UUID" | cut -f 5 -d " "').split(): self.cmd('scope org; delete uuid-suffix-pool {0}; commit-buffer'.format(uuid_pool)) for boot_policy in self.cmd('scope org; sh boot-policy | no-more | egrep -V "Name|----|UUID" | cut -f 5 -d " "').split(): self.cmd('scope org; delete boot-policy {0}; commit-buffer'.format(boot_policy)) for dyn_vnic_policy in self.cmd('scope org; show dynamic-vnic-conn-policy detail | no-more | egrep "Name:" | cut -f 6 -d " "').split(): self.cmd('scope org; delete dynamic-vnic-conn-policy {0}; commit-buffer'.format(dyn_vnic_policy)) for vlan in self.cmd('scope eth-uplink; sh vlan | no-more | eg -V "default|VLAN|Name|-----" | cut -f 5 -d " "').split(): self.cmd('scope eth-uplink; delete vlan {0}; commit-buffer'.format(vlan)) self.delete_static_cimc_ip('Complete') # run('acknowledge server {0} ; commit-buffer'.format(server_num), shell=False) for block in self.cmd('scope org; scope ip-pool ext-mgmt; sh block | egrep [1-9] | cut -f 5-10 -d " "').split('\n'): if block: self.cmd('scope org; scope ip-pool ext-mgmt; delete block {0}; commit-buffer'.format(block)) for pp in self.cmd('scope system; scope vm-mgmt; scope profile-set; show port-profile detail | no-more | egrep "Name:" | cut -f 6 -d " "').split('\n'): if pp.strip(): self.cmd("end; scope system; scope vm-mgmt; scope profile-set; delete port-profile {0} ; commit-buffer".format(pp.strip())) for fabric in 'a', 'b': for port_channel_id in self.cmd('scope eth-uplink; scope fabric {0}; show port-channel detail | egrep "Port Channel Id:" | cut -f 8 -d " "'.format(fabric)).split(): if port_channel_id.strip(): self.cmd('scope eth-uplink; scope fabric {0}; delete port-channel {1}; commit-buffer'.format(fabric, port_channel_id.strip())) lab_logger.info('finished')
def create_server(self, dev_num, hostname, on_nets, image_url, image_checksum): from lab.server import Server net_tmpl = ''' <interface type='network'> <source network='{{net_name}}'/> <mac address='{{mac}}'/> <target dev='v{{net_name}}-{hostname}'/> </interface> '''.format(lab_id=self.lab_id, dev_num=dev_num, hostname=hostname) macs = [ 'ee:{lab_id:02}:00:{net_id:02}:00:{dev_num:02}'.format( lab_id=self.lab_id, net_id=net_id, dev_num=dev_num) for net_id in on_nets ] net_names = [self.network_names[net_id] for net_id in on_nets] net_part = '\n\n'.join([ net_tmpl.format(net_name=net_names[i], mac=macs[i]) for i in range(len(on_nets)) ]) disk_part = ''' <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='{main_disk}'/> <target dev='vda' bus='virtio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='{cloud_init_disk}'/> <target dev='hdb' bus='ide'/> </disk> '''.format(main_disk=self.create_main_disk(hostname=hostname, image_url=image_url, image_checksum=image_checksum), cloud_init_disk=self.create_cloud_init_disk(hostname=hostname)) xml = self.domain_tmpl.format(hostname='{0}-{1}'.format( self.lab_id, hostname), net_part=net_part, disk_part=disk_part) self.save_xml(name=hostname, xml=xml) domain = self.connection.defineXML(xml) domain.create() ip = self.ip_for_mac_by_looking_at_libvirt_leases(net=net_names[0], mac=macs[0]) lab_logger.info(msg='Domain {0} created'.format(hostname)) return Server(ip=ip, username=self.username, password=Server.USE_SSH_KEY)
def log(self, message, level='info'): from lab.logger import lab_logger message = '{}: {}'.format(self, message) if level == 'info': lab_logger.info(message) elif level == 'warning': lab_logger.warning(message) else: raise RuntimeError('Specified "{}" logger level is not known'.format(level))
def _rest_api(self, commands, timeout=2): import requests import json from lab.logger import lab_logger lab_logger.info('{0} commands: {1}'.format(self, ", ".join(commands))) oob_ip, oob_u, oob_p = self.get_oob() body = [{"jsonrpc": "2.0", "method": "cli", "params": {"cmd": command, "version": 1}, "id": 1} for command in commands] try: data = json.dumps(body) result = requests.post('http://{0}/ins'.format(oob_ip), auth=(oob_u, oob_p), headers={'content-type': 'application/json-rpc'}, data=data, timeout=timeout) return result.json() except requests.exceptions.ConnectionError: self._allow_feature_nxapi() return self._rest_api(commands=commands, timeout=timeout)
def n9_configure_for_lab(self, topology): from lab.logger import lab_logger lab_logger.info('Configuring {0}'.format(self)) self.n9_get_status() self.cmd(['conf t', 'feature lacp']) all_vlan_ids = self.n9_configure_vlans() self.n9_configure_peer_link(all_vlan_ids=all_vlan_ids) self.n9_configure_ports(wires=self._upstream_wires) self.n9_configure_ports(wires=self._downstream_wires) if topology == self.lab().TOPOLOGY_VXLAN: self.n9_configure_asr1k()
def log(self): from lab.logger import lab_logger lab_logger.info('\n\n Report on lab: ') for hostname in sorted(self.hostname_2_ip.keys()): lab_logger.info(hostname + ': ' + self.hostname_2_ip[hostname]) lab_logger.info('\n') for role in sorted(self.info.keys()): lab_logger.info(role + ' ip: ' + ' '.join(self.get(role=role, parameter='ip')))
def report(jobs='devstacks.yaml', report_file='report.html', local_report_path='./reports/'): """fab jenkins_reports.report \t\t\t Gets report from jenkins in html format using yaml conf file. :param local_report_path: :param report_file: :param jobs: """ from lab.logger import lab_logger lab_logger.info("Getting report with jobs yaml {}".format(jobs)) user_name = 'localadmin' report_path = '/home/{user_name}/workspace/test_aggregator/openstack-sqe/tools/jenkins/job-helpers/'.format(user_name=user_name) with settings(user=user_name, password='******', host_string='172.29.172.165'): with cd(report_path): if exists('./{}'.format(report_file)): run('rm ./{}'.format(report_file)) with shell_env(JENKINS_URL='http://172.29.173.72:8080/', REPORT_NAME='REPORT', MAKE_BUG_LIST='TRUE'): run('python coi_ci_reporter.py {yaml}>{report}'.format(yaml=jobs, report=report_file)) get(remote_path=report_path + report_file, local_path=local_report_path + report_file)
def cobbler_configure_for(self, node): import validators from lab.time_func import time_as_string from lab.logger import lab_logger lab_logger.info('{}: (Re)creating cobbler profile for {}'.format(self, node)) system_name = '{}-{}'.format(self.lab(), node.get_id()) comment = 'This system is created by {0} for LAB {1} at {2}'.format(__file__, self.lab(), time_as_string()) network_commands = [] gateway = None for nic in node.get_nics().values(): ip, mask = nic.get_ip_and_mask() ip_mask_part = '--ip-address={} --netmask={} --static 1'.format(ip, mask) if validators.ipv4(str(ip)) else '' mac = nic.get_mac() name = nic.get_name() if nic.is_ssh(): gateway = nic.get_net()[0] if nic.is_bond(): for name_slave, mac_port in nic.get_slave_nics().items(): mac = mac_port['mac'] network_commands.append('--interface={} --mac={} --interface-type=bond_slave --interface-master={}'.format(name_slave, mac, name)) network_commands.append('--interface={} --interface-type=bond --bonding-opts="miimon=100 mode=1" {}'.format(name, ip_mask_part)) else: network_commands.append('--interface={} --mac={} {}'.format(name, mac, ip_mask_part)) systems = self.run('cobbler system list') if system_name in systems: self.run('cobbler system remove --name={}'.format(system_name)) self.run('cobbler system add --name={} --profile=RHEL7.2-x86_64 --kickstart=/var/lib/cobbler/kickstarts/sqe --comment="{}"'.format(system_name, comment)) self.run('cobbler system edit --name={} --hostname={} --gateway={}'.format(system_name, node.hostname(), gateway)) for cmd in network_commands: self.run('cobbler system edit --name={} {}'.format(system_name, cmd)) ipmi_ip, ipmi_username, ipmi_password = node.get_oob() self.run('cobbler system edit --name={} --power-type=ipmilan --power-address={} --power-user={} --power-pass={}'.format(system_name, ipmi_ip, ipmi_username, ipmi_password)) return system_name
def create_networks(self): tmpl = ''' <network> <name>{name}</name> <bridge name='br{name}' /> <forward mode="nat"/> {ip_part} </network> ''' ip_part4 = '<ip address="10.{lab_id}.0.1" netmask="255.255.255.0"><dhcp><range start="10.{lab_id}.0.2" end="10.{lab_id}.0.254" /></dhcp></ip>'.format(lab_id=self.lab_id) ip_part6 = '<ip family="ipv6" address="20{lab_id:02}::1" prefix="64"></ip>'.format(lab_id=self.lab_id) for net_id in self.network_id: name = self.network_names[net_id] xml = tmpl.format(name=name, ip_part=ip_part6 if net_id / 60 else ip_part4) self.save_xml(name='net-' + name, xml=xml) net = self.connection.networkDefineXML(xml) net.create() net.setAutostart(True) lab_logger.info('Network {0} created'.format(name))
def read_config_from_file(config_path, directory='', is_as_string=False): import yaml import requests import validators from lab.logger import lab_logger actual_path = actual_path_to_config(path=config_path, directory=directory) lab_logger.info('Taking config from {0}'.format(actual_path)) if validators.url(actual_path): resp = requests.get(actual_path) if resp.status_code != 200: raise ValueError('File is not available at this URL: {0}'.format(actual_path)) body_or_yaml = yaml.load(resp.text) else: with open(actual_path) as f: body_or_yaml = f.read() if is_as_string else yaml.load(f) if not body_or_yaml: raise ValueError('{0} is empty!'.format(actual_path)) return body_or_yaml
def create_server(self, dev_num, hostname, on_nets, image_url, image_checksum): from lab.server import Server net_tmpl = ''' <interface type='network'> <source network='{{net_name}}'/> <mac address='{{mac}}'/> <target dev='v{{net_name}}-{hostname}'/> </interface> '''.format(lab_id=self.lab_id, dev_num=dev_num, hostname=hostname) macs = ['ee:{lab_id:02}:00:{net_id:02}:00:{dev_num:02}'.format(lab_id=self.lab_id, net_id=net_id, dev_num=dev_num) for net_id in on_nets] net_names = [self.network_names[net_id] for net_id in on_nets] net_part = '\n\n'.join([net_tmpl.format(net_name=net_names[i], mac=macs[i]) for i in range(len(on_nets))]) disk_part = ''' <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='{main_disk}'/> <target dev='vda' bus='virtio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='{cloud_init_disk}'/> <target dev='hdb' bus='ide'/> </disk> '''.format(main_disk=self.create_main_disk(hostname=hostname, image_url=image_url, image_checksum=image_checksum), cloud_init_disk=self.create_cloud_init_disk(hostname=hostname)) xml = self.domain_tmpl.format(hostname='{0}-{1}'.format(self.lab_id, hostname), net_part=net_part, disk_part=disk_part) self.save_xml(name=hostname, xml=xml) domain = self.connection.defineXML(xml) domain.create() ip = self.ip_for_mac_by_looking_at_libvirt_leases(net=net_names[0], mac=macs[0]) lab_logger.info(msg='Domain {0} created'.format(hostname)) return Server(ip=ip, username=self.username, password=Server.USE_SSH_KEY)
def create_networks(self): tmpl = ''' <network> <name>{name}</name> <bridge name='br{name}' /> <forward mode="nat"/> {ip_part} </network> ''' ip_part4 = '<ip address="10.{lab_id}.0.1" netmask="255.255.255.0"><dhcp><range start="10.{lab_id}.0.2" end="10.{lab_id}.0.254" /></dhcp></ip>'.format( lab_id=self.lab_id) ip_part6 = '<ip family="ipv6" address="20{lab_id:02}::1" prefix="64"></ip>'.format( lab_id=self.lab_id) for net_id in self.network_id: name = self.network_names[net_id] xml = tmpl.format(name=name, ip_part=ip_part6 if net_id / 60 else ip_part4) self.save_xml(name='net-' + name, xml=xml) net = self.connection.networkDefineXML(xml) net.create() net.setAutostart(True) lab_logger.info('Network {0} created'.format(name))
def run(self): import time from lab.logger import lab_logger results = {} self.status() for provider in self.providers: start_time = time.time() lab_logger.info('Running {}'.format(provider)) self.servers.extend(provider.wait_for_servers()) results[str(provider)] = 'spent_time={0}'.format(time.time() - start_time) for deployer in self.deployers: lab_logger.info('Running {}'.format(deployer)) start_time = time.time() self.clouds.append(deployer.wait_for_cloud(self.servers)) results[str(deployer)] = 'spent_time={0}'.format(time.time() - start_time) for runner in self.runners: lab_logger.info('Running {}'.format(runner)) start_time = time.time() runner.execute(self.clouds, self.servers) results[str(runner)] = 'spent_time={0}'.format(time.time() - start_time)
def create_config_file_for_osp7_install(self, topology=TOPOLOGY_VLAN): import os from lab.logger import lab_logger from lab.with_config import read_config_from_file from lab.cimc import CimcServer from lab.fi import FI from lab.n9k import Nexus lab_logger.info('Creating config for osp7_bootstrap') osp7_install_template = read_config_from_file(config_path='./configs/osp7/osp7-install.yaml', is_as_string=True) # Calculate IPs for user net, VIPs and director IP ssh_net = filter(lambda net: net.is_ssh(), self._nets.values())[0] overcloud_network_cidr, overcloud_external_gateway, overcloud_external_ip_start, overcloud_external_ip_end = ssh_net.cidr, ssh_net[1], ssh_net[4+1], ssh_net[-3] eth0_mac_versus_service_profile = {} overcloud_section = [] for server in self.get_controllers() + self.get_computes(): service_profile_name = '""' if isinstance(server, CimcServer) else server.get_ucsm_info()[1] try: eth0_nic = server.get_nic(nic='eth0')[0] except IndexError: raise ValueError('{0} has no eth0'.format(server.name())) eth0_mac = eth0_nic.get_mac() eth0_mac_versus_service_profile[eth0_mac] = service_profile_name try: pxe_int_nic = server.get_nic(nic='pxe-int')[0] except IndexError: raise ValueError('{0} has no pxe-int'.format(server.name())) pxe_mac = pxe_int_nic.get_mac() ipmi_ip, ipmi_username, ipmi_password = server.get_ipmi() role = server.name().split('-')[0] descriptor = {'"arch"': '"x86_64"', '"cpu"': '"2"', '"memory"': '"8256"', '"disk"': '"1112"', '"name"': '"{0}"'.format(server.name()), '"capabilities"': '"profile:{0},boot_option:local"'.format(role), '"mac"': '["{0}"]'.format(pxe_mac), '"pm_type"': '"pxe_ipmitool"', '"pm_addr"': '"{0}"'.format(ipmi_ip), '"pm_user"': '"{0}"'.format(ipmi_username), '"pm_password"': '"{0}"'.format(ipmi_password)} overcloud_section.append(',\n\t '.join(['{0}:{1}'.format(x, y) for x, y in sorted(descriptor.iteritems())])) network_ucsm_host_list = ','.join(['{0}:{1}'.format(name, mac) for name, mac in eth0_mac_versus_service_profile.iteritems()]) overcloud_nodes = '{{"nodes":[\n\t{{\n\t {0}\n\t}}\n ]\n }}'.format('\n\t},\n\t{\n\t '.join(overcloud_section)) nexus_section = [] switch_tempest_section = [] for n9 in self.get_nodes_by_class(Nexus): common_pcs_part = ': {"ports": "port-channel:' + str(n9.get_peer_link_id()) # all pcs n9k-n9k and n9k-fi fi_pc_part = ',port-channel:' + ',port-channel:'.join(n9.get_pcs_to_fi()) mac_port_lines = [] for server in self.get_controllers() + self.get_computes(): mac = server.get_nic('pxe-int')[0].get_mac() if isinstance(server, CimcServer): individual_ports_part = ','.join([x.get_peer_node(server) for x in server.get_all_wires() if x.get_peer_node(server) == n9]) # add if wired to this n9k only if individual_ports_part: individual_ports_part = ',' + individual_ports_part else: individual_ports_part = fi_pc_part mac_port_lines.append('"' + mac + '"' + common_pcs_part + individual_ports_part + '" }') nexus_servers_section = ',\n\t\t\t\t\t\t'.join(mac_port_lines) ssh_ip, ssh_username, ssh_password, hostname = n9.get_ssh() switch_tempest_section.append({'hostname': hostname, 'username': ssh_username, 'password': ssh_password, 'sw': str(ssh_ip)}) n9k_description = ['"' + hostname + '": {', '"ip_address": "' + str(ssh_ip) + '",', '"username": "******",', '"password": "******",', '"nve_src_intf": 2,', '"ssh_port": 22,', '"physnet": "datacentre",', '"servers": {' + nexus_servers_section + '}}', ] nexus_section.append('\n\t\t\t'.join(n9k_description)) network_nexus_config = '{\n\t\t' + ',\n\t\t'.join(nexus_section) + '}' n_controls, n_computes, n_ceph = self.count_role(role_name='control'), self.count_role(role_name='compute'), self.count_role(role_name='ceph') director_node_ssh_ip, _, _, director_hostname = self.get_director().get_ssh() pxe_int_vlans = self._cfg['nets']['pxe-int']['vlan'] eth1_vlans = self._cfg['nets']['eth1']['vlan'] ext_vlan, test_vlan, stor_vlan, stor_mgmt_vlan, tenant_vlan, fip_vlan = eth1_vlans[1], pxe_int_vlans[1], pxe_int_vlans[2], pxe_int_vlans[3], pxe_int_vlans[4], eth1_vlans[0] ucsm_vip = self.get_nodes_by_class(FI)[0].get_vip() cfg = osp7_install_template.format(director_node_hostname=director_hostname, director_node_ssh_ip=director_node_ssh_ip, ext_vlan=ext_vlan, test_vlan=test_vlan, stor_vlan=stor_vlan, stor_mgmt_vlan=stor_mgmt_vlan, tenant_vlan=tenant_vlan, fip_vlan=fip_vlan, vlan_range=self.vlan_range(), overcloud_network_cidr=overcloud_network_cidr, overcloud_external_ip_start=overcloud_external_ip_start, overcloud_external_gateway=overcloud_external_gateway, overcloud_external_ip_end=overcloud_external_ip_end, overcloud_nodes=overcloud_nodes, overcloud_control_scale=n_controls, overcloud_ceph_storage_scale=n_ceph, overcloud_compute_scale=n_computes, network_ucsm_ip=ucsm_vip, network_ucsm_username=self._neutron_username, network_ucsm_password=self._neutron_password, network_ucsm_host_list=network_ucsm_host_list, undercloud_lab_pxe_interface='pxe-ext', undercloud_local_interface='pxe-int', undercloud_fake_gateway_interface='eth1', provisioning_nic='nic4', tenant_nic='nic1', external_nic='nic2', cobbler_system='G{0}-DIRECTOR'.format(self._id), network_nexus_config=network_nexus_config, switch_tempest_section=switch_tempest_section, do_sriov=self._is_sriov ) if topology == self.TOPOLOGY_VXLAN: pass folder = 'artifacts' file_path = os.path.join(folder, 'g{0}-osp7-install-config.conf'.format(self._id)) if not os.path.exists(folder): os.makedirs(folder) with open(file_path, 'w') as f: f.write(cfg) lab_logger.info('finished. Execute osp7_bootstrap --config {0}'.format(file_path))
def cmd(config_path): """fab cmd:g10\t\t\t\tRun single command on lab device. :param config_path: path to valid hardware lab configuration, usually one of yaml in $REPO/configs """ from fabric.operations import prompt from six import print_ from lab.laboratory import Laboratory from lab.deployers.deployer_existing import DeployerExisting from lab.logger import lab_logger l = Laboratory(config_path=config_path) nodes = sorted(map(lambda node: node.get_id(), l.get_nodes_by_class())) while True: device_name = prompt(text='{lab} has: "cloud" and:\n {nodes}\n(use "quit" to quit)\n node? '.format(lab=l, nodes=nodes)) if device_name == 'cloud': d = DeployerExisting({'cloud': config_path, 'hardware-lab-config': config_path}) device = d.wait_for_cloud([]) elif device_name in ['quit', 'q', 'exit']: return elif device_name not in nodes: print_(device_name, 'is not available') continue else: device = l.get_node_by_id(device_name) method_names = [x for x in dir(device) if not x.startswith('_')] print_(device, ' has: \n', '\n'.join(method_names), '\n(use "node" to get back to node selection)') while True: input_method_name = prompt(text='\n\n>>{0}<< operation?: '.format(device)) if input_method_name in ['quit', 'q', 'exit']: return elif input_method_name == 'node': break elif input_method_name in ['r', 'rpt']: print_(device, ' has: \n', '\n'.join(method_names), '\n(use "node" to get back to node selection)') continue else: methods_in_filter = filter(lambda mth: input_method_name in mth, method_names) if len(methods_in_filter) == 0: lab_logger.info('{} is not available'.format(input_method_name)) continue elif len(methods_in_filter) == 1: input_method_name = methods_in_filter[0] elif len(methods_in_filter) > 1: lab_logger.info('input "{}" matches:\n{}'.format(input_method_name, '\n'.join(methods_in_filter))) continue method_to_execute = getattr(device, input_method_name) parameters = method_to_execute.func_code.co_varnames[1:method_to_execute.func_code.co_argcount] arguments = [] for parameter in parameters: argument = prompt(text='{p}=? '.format(p=parameter)) if argument.startswith('['): argument = argument.strip('[]').split(',') elif argument in ['True', 'true', 'yes']: argument = True elif argument in ['False', 'false', 'no']: argument = False arguments.append(argument) # noinspection PyBroadException try: results = method_to_execute(*arguments) lab_logger.info('\n>>{}<< RESULTS:\n\n{}\n'.format(device, results)) except Exception as ex: lab_logger.exception('\n Exception: {0}'.format(ex))
def decorated_func(*args, **kwargs): start_time = time.time() result = function(*args, **kwargs) lab_logger.info('TIMED: Function [{0}] finished in {1} sec'.format(function.__name__, int(time.time() - start_time))) return result
def create_config_file_for_osp7_install(self, topology=TOPOLOGY_VLAN): import os from lab.logger import lab_logger from lab.with_config import read_config_from_file from lab.nodes.cimc_server import CimcServer from lab.nodes.fi import FI from lab.nodes.n9 import N9 lab_logger.info('Creating config for osp7_bootstrap') osp7_install_template = read_config_from_file(config_path='./configs/osp7/osp7-install.yaml', is_as_string=True) # Calculate IPs for user net, VIPs and director IP ssh_net = filter(lambda net: net.is_ssh(), self._nets.values())[0] overcloud_network_cidr, overcloud_external_gateway, overcloud_external_ip_start, overcloud_external_ip_end = ssh_net.cidr, ssh_net[1], ssh_net[4 + 1], ssh_net[-3] eth0_mac_versus_service_profile = {} overcloud_section = [] for server in self.get_controllers() + self.get_computes(): service_profile_name = '""' if isinstance(server, CimcServer) else server.get_ucsm_info()[1] try: eth0_nic = server.get_nic(nic='eth0')[0] except IndexError: raise ValueError('{0} has no eth0'.format(server.name())) eth0_mac = eth0_nic.get_mac() eth0_mac_versus_service_profile[eth0_mac] = service_profile_name try: pxe_int_nic = server.get_nic(nic='pxe-int')[0] except IndexError: raise ValueError('{0} has no pxe-int'.format(server.name())) pxe_mac = pxe_int_nic.get_mac() ipmi_ip, ipmi_username, ipmi_password = server.get_ipmi() role = server.name().split('-')[0] descriptor = {'"arch"': '"x86_64"', '"cpu"': '"2"', '"memory"': '"8256"', '"disk"': '"1112"', '"name"': '"{0}"'.format(server.name()), '"capabilities"': '"profile:{0},boot_option:local"'.format(role), '"mac"': '["{0}"]'.format(pxe_mac), '"pm_type"': '"pxe_ipmitool"', '"pm_addr"': '"{0}"'.format(ipmi_ip), '"pm_user"': '"{0}"'.format(ipmi_username), '"pm_password"': '"{0}"'.format(ipmi_password)} overcloud_section.append(',\n\t '.join(['{0}:{1}'.format(x, y) for x, y in sorted(descriptor.items())])) network_ucsm_host_list = ','.join(['{0}:{1}'.format(name, mac) for name, mac in eth0_mac_versus_service_profile.items()]) overcloud_nodes = '{{"nodes":[\n\t{{\n\t {0}\n\t}}\n ]\n }}'.format('\n\t},\n\t{\n\t '.join(overcloud_section)) nexus_section = [] switch_tempest_section = [] for n9 in self.get_nodes_by_class(N9): common_pcs_part = ': {"ports": "port-channel:' + str(n9.get_peer_link_id()) # all pcs n9k-n9k and n9k-fi fi_pc_part = ',port-channel:' + ',port-channel:'.join(n9.get_pcs_to_fi()) mac_port_lines = [] for server in self.get_controllers() + self.get_computes(): mac = server.get_nic('pxe-int')[0].get_mac() if isinstance(server, CimcServer): individual_ports_part = ','.join([x.get_peer_node(server) for x in server.get_all_wires() if x.get_peer_node(server) == n9]) # add if wired to this n9k only if individual_ports_part: individual_ports_part = ',' + individual_ports_part else: individual_ports_part = fi_pc_part mac_port_lines.append('"' + mac + '"' + common_pcs_part + individual_ports_part + '" }') nexus_servers_section = ',\n\t\t\t\t\t\t'.join(mac_port_lines) ssh_ip, ssh_username, ssh_password, hostname = n9.get_ssh() switch_tempest_section.append({'hostname': hostname, 'username': ssh_username, 'password': ssh_password, 'sw': str(ssh_ip)}) n9k_description = ['"' + hostname + '": {', '"ip_address": "' + str(ssh_ip) + '",', '"username": "******",', '"password": "******",', '"nve_src_intf": 2,', '"ssh_port": 22,', '"physnet": "datacentre",', '"servers": {' + nexus_servers_section + '}}', ] nexus_section.append('\n\t\t\t'.join(n9k_description)) network_nexus_config = '{\n\t\t' + ',\n\t\t'.join(nexus_section) + '}' n_controls, n_computes, n_ceph = self.count_role(role_name='control'), self.count_role(role_name='compute'), self.count_role(role_name='ceph') director_node_ssh_ip, _, _, director_hostname = self.get_director().get_ssh() pxe_int_vlans = self._cfg['nets']['pxe-int']['vlan'] eth1_vlans = self._cfg['nets']['eth1']['vlan'] ext_vlan, test_vlan, stor_vlan, stor_mgmt_vlan, tenant_vlan, fip_vlan = eth1_vlans[1], pxe_int_vlans[1], pxe_int_vlans[2], pxe_int_vlans[3], pxe_int_vlans[4], eth1_vlans[0] ucsm_vip = self.get_nodes_by_class(FI)[0].get_ucsm_vip() cfg = osp7_install_template.format(director_node_hostname=director_hostname, director_node_ssh_ip=director_node_ssh_ip, ext_vlan=ext_vlan, test_vlan=test_vlan, stor_vlan=stor_vlan, stor_mgmt_vlan=stor_mgmt_vlan, tenant_vlan=tenant_vlan, fip_vlan=fip_vlan, vlan_range=self.vlan_range(), overcloud_network_cidr=overcloud_network_cidr, overcloud_external_ip_start=overcloud_external_ip_start, overcloud_external_gateway=overcloud_external_gateway, overcloud_external_ip_end=overcloud_external_ip_end, overcloud_nodes=overcloud_nodes, overcloud_control_scale=n_controls, overcloud_ceph_storage_scale=n_ceph, overcloud_compute_scale=n_computes, network_ucsm_ip=ucsm_vip, network_ucsm_username=self._neutron_username, network_ucsm_password=self._neutron_password, network_ucsm_host_list=network_ucsm_host_list, undercloud_lab_pxe_interface='pxe-ext', undercloud_local_interface='pxe-int', undercloud_fake_gateway_interface='eth1', provisioning_nic='nic4', tenant_nic='nic1', external_nic='nic2', cobbler_system='G{0}-DIRECTOR'.format(self._id), network_nexus_config=network_nexus_config, switch_tempest_section=switch_tempest_section, do_sriov=self._is_sriov ) if topology == self.TOPOLOGY_VXLAN: pass folder = 'artifacts' file_path = os.path.join(folder, 'g{0}-osp7-install-config.conf'.format(self._id)) if not os.path.exists(folder): os.makedirs(folder) with open(file_path, 'w') as f: f.write(cfg) lab_logger.info('finished. Execute osp7_bootstrap --config {0}'.format(file_path))
def logger(self, message): from lab.logger import lab_logger lab_logger.info('{0}: CIMC {1}'.format(self, message))
def configure_for_osp7(self): import time from lab.logger import lab_logger lab_logger.info('Configuring {}'.format(self)) server_pool_name = 'QA-SERVERS' uuid_pool_name = 'QA' dynamic_vnic_policy_name = 'dvnic-4' self.cleanup() n_servers = len(self.lab().get_nodes_by_class(FiServer)) # how many servers UCSM currently sees neutron_username, neutron_password = self.lab().get_neutron_creds() self.create_user(username=neutron_username, password=self._password) # special user to be used by neutron services self.create_uplink(wires=self._upstream_wires) self.create_uuid_pool(pool_name=uuid_pool_name, n_uuids=n_servers) self.create_boot_policies(vnics=self._lab.ucsm_nets_with_pxe()) self.create_dynamic_vnic_connection_policy(policy_name=dynamic_vnic_policy_name) self.create_vlans(vlans=self.lab().get_all_vlans()) self.create_server_pool(name=server_pool_name) # MAC pools # for if_name, mac_value, _ in [config['pxe-int-net'], config['user-net'], config['eth0-net'], config['eth1-net']]: # mac_range = '{0}:01 {0}:{1}'.format(mac_value, n_servers) # run('scope org; create mac-pool {0}; set assignment-order sequential; create block {1}; commit-buffer'.format(if_name, mac_range), shell=False) # IPMI ip pool # ipmi_pool = '{first} {last} {gw} {mask}'.format(first=str(ipmi_net[config['mgmt-net']['start']]), # last=str(ipmi_net[config['mgmt-net']['end']]), gw=str(ipmi_net[1]), mask=str(ipmi_net.netmask)) # run('scope org; scope ip-pool ext-mgmt; set assignment-order sequential; create block {0}; commit-buffer'.format(ipmi_pool), shell=False) for wire in self._downstream_wires: server = wire.get_peer_node(self) server_id, service_profile_name = server.get_ucsm_info() is_sriov = self.lab().is_sriov() ipmi_ip, _, _ = server.get_ipmi() ipmi_net = self.lab().get_ipmi_net() ipmi_gw, ipmi_netmask = str(ipmi_net[1]), ipmi_net.netmask self.create_ipmi_static(server_id=server_id, ip=ipmi_ip, gw=ipmi_gw, netmask=ipmi_netmask) self.add_server_to_pool(server_id, server_pool_name) self.create_service_profile(service_profile_name, is_sriov) for order, vnic in enumerate(server.get_nics(), start=1): vlans = self.lab().get_net_vlans(vnic.get_name()) self.create_vnic_with_vlans(profile=service_profile_name, vnic=vnic.get_name(), mac=vnic.get_mac(), order=order, vlans=vlans) if is_sriov and 'compute' in service_profile_name and vnic.get_name() in ['eth1']: self.set_dynamic_vnic_connection_policy(profile=service_profile_name, vnic=vnic, policy_name=dynamic_vnic_policy_name) self.set_boot_policy_to_service_profile(profile=service_profile_name, policy_name='pxe-ext' if 'director' in server.name() else 'pxe-int') self.associate_server_with_profile(profile=service_profile_name, server_id=server_id) # main step - association - here server will be rebooted count_attempts = 0 while count_attempts < 100: lines = self.list_service_profiles(flt='Associated') if len(lines) == n_servers: lab_logger.info('finished {0}'.format(self)) return time.sleep(10) count_attempts += 1 raise RuntimeError('failed to associated all service profiles')