def reattach(params): """Reattach a specific node device and removed it from pci-stub driver, argument 'params' is a dictionary type and includes 'pciaddress' key, whose value uniquely identify a pci address of the node device """ global logger logger = params['logger'] pciaddress = params['pciaddress'] original_driver = check_node_reattach(pciaddress) logger.info("original device driver: %s" % original_driver) kernel_version = utils.get_host_kernel_version() hypervisor = utils.get_hypervisor() pciback = '' if hypervisor == 'kvm': pciback = 'pci-stub' if hypervisor == 'xen': pciback = 'pciback' if 'el5' in kernel_version: vendor_product_get = "lspci -n |grep %s|awk '{print $3}'" % pciaddress logger.debug("the vendor:product is %s" % vendor_product_get) (status, retval) = commands.getstatusoutput(vendor_product_get) if status != 0: logger.error("failed to get vendor product ID") return 1 else: vendor_ID = retval.split(":")[0] product_ID = retval.split(":")[1] device_name = "pci_%s_%s" % (vendor_ID, product_ID) else: (bus, slot_func) = pciaddress.split(":") (slot, func) = slot_func.split(".") device_name = "pci_0000_%s_%s_%s" % (bus, slot, func) logger.debug("the name of the pci device is: %s" % device_name) conn = sharedmod.libvirtobj['conn'] try: nodeobj = conn.nodeDeviceLookupByName(device_name) nodeobj.reAttach() logger.info("reattach the node device") current_driver = check_node_reattach(pciaddress) logger.info("current device driver: %s" % current_driver) if original_driver == pciback and current_driver != pciback: logger.info("the node %s device reattach is successful" % device_name) else: logger.info("the node %s device reattach is failed" % device_name) return 1 except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) return 1 return 0
def detach(params): """Dettach a specific node device and bind it to pci-stub driver, argument 'params' is a dictionary type and includes 'pciaddress' key, whose value uniquely identify a pci address of the node device """ global logger logger = params['logger'] pciaddress = params['pciaddress'] original_driver = check_node_detach(pciaddress) logger.info("original device driver: %s" % original_driver) kernel_version = utils.get_host_kernel_version() hypervisor = utils.get_hypervisor() pciback = '' if hypervisor == 'kvm': pciback = 'pci-stub' if hypervisor == 'xen': pciback = 'pciback' if 'el5' in kernel_version: vendor_product_get = "lspci -n |grep %s|awk '{print $3}'" % pciaddress logger.debug("the vendor:product is %s" % vendor_product_get) (status, retval) = commands.getstatusoutput(vendor_product_get) if status != 0: logger.error("failed to get vendor product ID") return 1 else: vendor_ID = retval.split(":")[0] product_ID = retval.split(":")[1] device_name = "pci_%s_%s" % (vendor_ID, product_ID) else: (bus, slot_func) = pciaddress.split(":") (slot, func) = slot_func.split(".") device_name = "pci_0000_%s_%s_%s" % (bus, slot, func) logger.debug("the name of the pci device is: %s" % device_name) conn = sharedmod.libvirtobj['conn'] try: nodeobj = conn.nodeDeviceLookupByName(device_name) logger.info("detach the node device") nodeobj.dettach() current_driver = check_node_detach(pciaddress) logger.info("current device driver: %s" % current_driver) if current_driver != original_driver and current_driver == pciback: logger.info("the node %s device detach is successful" % device_name) else: logger.info("the node %s device detach is failed" % device_name) return 1 except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) return 1 return 0
def multiple_thread_block_on_domain_create(params): """ spawn multiple threads to create guest simultaneously check the return status of calling create API """ logger = params['logger'] guestos = params.get('guestos') arch = params.get('guestarch') num = params.get('guestnum') xmlstr = params['xml'] logger.info("the os of guest is %s" % guestos) logger.info("the arch of guest is %s" % arch) logger.info("the number of guest we are going to install is %s" % num) hypervisor = utils.get_hypervisor() uri = params['uri'] auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_PASSPHRASE], request_credentials, None] conn = libvirt.openAuth(uri, auth, 0) logger.info("the type of hypervisor is %s" % hypervisor) logger.debug("the uri to connect is %s" % uri) envfile = os.path.join(HOME_PATH, 'global.cfg') envparser = env_parser.Envparser(envfile) ostree = envparser.get_value("guest", guestos + "_" + arch) ks = envparser.get_value("guest", guestos + "_" + arch + "_http_ks") # download vmlinuz and initrd.img vmlinuzpath = os.path.join(ostree, 'isolinux/vmlinuz') initrdpath = os.path.join(ostree, 'isolinux/initrd.img') urllib.urlretrieve(vmlinuzpath, '/var/lib/libvirt/boot/vmlinuz') urllib.urlretrieve(initrdpath, '/var/lib/libvirt/boot/initrd.img') name = "guest" start_num = num.split('-')[0] end_num = num.split('-')[1] thread_pid = [] for i in range(int(start_num), int(end_num)): guestname = name + str(i) thr = guest_install(guestname, guestos, arch, ks, conn, xmlstr, logger) thread_pid.append(thr) for id in thread_pid: id.start() for id in thread_pid: id.join() conn.close() return 0
def sched_params(params): """Setting scheduler parameters, argument params is a dictionary data type.which includes 'weight' and 'cap' keys, by assigning different value to 'weight' and 'cap' to verify validity of the result """ hypervisor = utils.get_hypervisor() logger = params['logger'] guestname = params['guestname'] conn = sharedmod.libvirtobj['conn'] domobj = conn.lookupByName(guestname) if check_guest_status(domobj): sched_params = domobj.schedulerParameters() logger.info("original scheduler parameters: %s\n" % sched_params) if 'xen' in hypervisor: str_weight = params['weight'] str_cap = params['cap'] for wgt in eval(str_weight): for cap in eval(str_cap): dicts = {'weight': wgt, 'cap': cap} logger.info("setting scheduler parameters: %s" % dicts) domobj.setSchedulerParameters(dicts) sched_params = domobj.schedulerParameters() logger.info( "current scheduler parameters: %s\n" % sched_params) retval = check_sched_params(hypervisor, dicts, guestname, domobj) if retval != 0: return 1 elif 'kvm' in hypervisor: cpu_shares = int(params['cpushares']) dicts = {'cpu_shares': cpu_shares} logger.info("setting scheduler parameters: %s" % dicts) domobj.setSchedulerParameters(dicts) sched_params = domobj.schedulerParameters() logger.info("current scheduler parameters: %s\n" % sched_params) retval = check_sched_params(hypervisor, dicts, guestname, domobj) if retval != 0: return 1 else: logger.error("unsupported hypervisor type: %s" % hypervisor) return 1 return 0
def reboot(params): """Reboot virtual machine Return 0 on SUCCESS or 1 on FAILURE """ # Initiate and check parameters global logger logger = params['logger'] params.pop('logger') domain_name = params['guestname'] # Connect to local hypervisor connection URI hypervisor = utils.get_hypervisor() if hypervisor == "kvm": logger.info("kvm hypervisor doesn't support the funtion now") return 0 conn = sharedmod.libvirtobj['conn'] domobj = conn.lookupByName(domain_name) # Get domain ip logger.info("get the mac address of vm %s" % domain_name) mac = utils.get_dom_mac_addr(domain_name) logger.info("the mac address of vm %s is %s" % (domain_name, mac)) logger.info("get ip by mac address") ip = utils.mac_to_ip(mac, 180) logger.info("the ip address of vm %s is %s" % (domain_name, ip)) timeout = 600 logger.info('reboot vm %s now' % domain_name) # Reboot domain try: domobj = reboot(0) except libvirtError, e: logger.error("API error message: %s, error code is %s" \ % (e.message, e.get_error_code())) logger.error("fail to reboot domain") return 1
def install_linux_check(params): """check guest status after installation, including network ping, read/write option in guest. return value: 0 - ok; 1 - bad """ global logger logger = params['logger'] params.pop('logger') guestname = params.get('guestname') virt_type = params.get('virt_type') logger.info("the name of guest is %s" % guestname) # Connect to local hypervisor connection URI hypervisor = utils.get_hypervisor() logger.info("the type of hypervisor is %s" % hypervisor) conn = sharedmod.libvirtobj['conn'] domobj = conn.lookupByName(guestname) state = domobj.info()[0] if(state == libvirt.VIR_DOMAIN_SHUTOFF): logger.info("guest is shutoff, if u want to run this case, \ guest must be started") return 1 logger.info("get the mac address of vm %s" % guestname) mac = utils.get_dom_mac_addr(guestname) logger.info("the mac address of vm %s is %s" % (guestname, mac)) timeout = 300 while timeout: ipaddr = utils.mac_to_ip(mac, 180) if not ipaddr: logger.info(str(timeout) + "s left") time.sleep(10) timeout -= 10 else: logger.info("the ip address of vm %s is %s" % (guestname, ipaddr)) break if timeout == 0: logger.info("vm %s fail to get ip address" % guestname) return 1 time.sleep(120) logger.info("Now checking guest health after installation") domain_name=guestname blk_type=params['hddriver'] nic_type=params['nicdriver'] Test_Result = 0 # Ping guest from host logger.info("check point1: ping guest from host") if utils.do_ping(ipaddr, 20) == 1: logger.info("ping current guest successfull") else: logger.error("Error: can't ping current guest") Test_Result = 1 return Test_Result # Creat file and read file in guest. logger.info("check point2: creat and read dirctory/file in guest") if utils.create_dir(ipaddr, "root", "redhat") == 0: logger.info("create dir - /tmp/test successfully") if utils.write_file(ipaddr, "root", "redhat") == 0: logger.info("write and read file: /tmp/test/test.log successfully") else: logger.error("Error: fail to write/read file - /tmp/test/test.log") Test_Result = 1 return Test_Result else: logger.error("Error: fail to create dir - /tmp/test") Test_Result = 1 return Test_Result # Check whether vcpu equals the value set in geust config xml logger.info("check point3: check cpu number in guest equals to \ the value set in domain config xml") vcpunum_expect = int(utils.get_num_vcpus(domain_name)) logger.info("vcpu number in domain config xml - %s is %s" % \ (domain_name, vcpunum_expect)) vcpunum_actual = int(utils.get_remote_vcpus(ipaddr, "root", "redhat")) logger.info("The actual vcpu number in guest - %s is %s" % (domain_name, vcpunum_actual)) if vcpunum_expect == vcpunum_actual: logger.info("The actual vcpu number in guest is \ equal to the setting your domain config xml") else: logger.error("Error: The actual vcpu number in guest is \ NOT equal to the setting your domain config xml") Test_Result = 1 return Test_Result # Check whether mem in guest is equal to the value set in domain config xml logger.info("check point4: check whether mem in guest is equal to \ the value set in domain config xml") mem_expect = utils.get_size_mem(domain_name) logger.info("current mem size in domain config xml - %s is %s" % (domain_name, mem_expect)) mem_actual = utils.get_remote_memory(ipaddr, "root", "redhat") logger.info("The actual mem size in guest - %s is %s" % (domain_name, mem_actual)) diff_range = int(mem_expect) * 0.07 diff = int(mem_expect) - int(mem_actual) if int(math.fabs(diff)) < int(diff_range): logger.info("The actual mem size in guest is almost equal to \ the setting your domain config xml") else: logger.error("Error: The actual mem size in guest is NOT equal to \ the setting your domain config xml") Test_Result = 1 return Test_Result # Check app works fine in guest, such as: wget logger.info("check point5: check app works fine in guest, such as: wget") logger.info("get system environment information") envfile = os.path.join(HOME_PATH, 'global.cfg') logger.info("the environment file is %s" % envfile) envparser = env_parser.Envparser(envfile) file_url = envparser.get_value("other", "wget_url") if utils.run_wget_app(ipaddr, "root", "redhat", file_url, logger) == 0: logger.info("run wget successfully in guest.") else: logger.error("Error: fail to run wget in guest") Test_Result = 1 return Test_Result # Check nic and blk driver in guest if 'kvm' in virt_type or 'xenfv' in virt_type: logger.info("check point6: check nic and blk driver in guest is \ expected as your config:") if utils.validate_remote_nic_type(ipaddr, "root", "redhat", nic_type, logger) == 0 and \ utils.validate_remote_blk_type(ipaddr, "root", "redhat", blk_type, logger) == 0: logger.info("nic type - %s and blk type - %s check successfully" % (nic_type, blk_type)) else: logger.error("Error: nic type - %s or blk type - %s check failed" % (nic_type, blk_type)) Test_Result = 1 return Test_Result return Test_Result
def reboot(params): """Reboot virtual machine Return 0 on SUCCESS or 1 on FAILURE """ # Initiate and check parameters global logger logger = params['logger'] params.pop('logger') domain_name = params['guestname'] # Connect to local hypervisor connection URI hypervisor = utils.get_hypervisor() if hypervisor == "kvm": logger.info("kvm hypervisor doesn't support the funtion now") return 0 conn = sharedmod.libvirtobj['conn'] domobj = conn.lookupByName(domain_name) # Get domain ip logger.info("get the mac address of vm %s" % domain_name) mac = utils.get_dom_mac_addr(domain_name) logger.info("the mac address of vm %s is %s" % (domain_name, mac)) logger.info("get ip by mac address") ip = utils.mac_to_ip(mac, 180) logger.info("the ip address of vm %s is %s" % (domain_name, ip)) timeout = 600 logger.info('reboot vm %s now' % domain_name) # Reboot domain try: domobj = reboot(0) except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) logger.error("fail to reboot domain") return 1 logger.info("the vm %s is power off" % domain_name) # Check domain status by ping ip while timeout: time.sleep(10) timeout -= 10 if utils.do_ping(ip, 0): logger.info(str(timeout) + "s left") else: logger.info("vm %s power off successfully" % domain_name) break if timeout == 0: logger.info("fail to power off %s" % domain_name) return 1 timeout = 600 logger.info("the vm %s is power on" % domain_name) while timeout: time.sleep(10) timeout -= 10 if not utils.do_ping(ip, 0): logger.info(str(timeout) + "s left") else: logger.info("vm %s power on successfully") break if timeout == 0: logger.info("fail to power on vm %s" % domain_name) return 1 return 0