def vcpu_affinity_check(domain_name, vcpu, expected_pinned_cpu, hypervisor): """check the task in the process of the running virtual machine grep Cpus_allowed_list /proc/PID/task/*/status """ host_kernel_version = utils.get_host_kernel_version() if 'qemu' in hypervisor: get_pid_cmd = "cat /var/run/libvirt/qemu/%s.pid" % domain_name status, pid = commands.getstatusoutput(get_pid_cmd) if status: logger.error("failed to get the pid of \ the running virtual machine process") return 1 if 'el6' in host_kernel_version: cmd_get_task_list = "grep Cpus_allowed_list /proc/%s/task/*/status" % pid status, output = commands.getstatusoutput(cmd_get_task_list) logger.debug("the output of command 'grep Cpus_allowed_list \ /proc/%s/task/*/status' is %s" % (pid, output)) task_list = output.split('\n')[1:] vcpu_task = task_list[int(vcpu)] actual_pinned_cpu = int(vcpu_task.split('\t')[1]) elif 'el5' in host_kernel_version: cmd_get_task_list = "grep Cpus_allowed /proc/%s/task/*/status" % pid status, output = commands.getstatusoutput(cmd_get_task_list) logger.debug("the output of command 'grep Cpus_allowed \ /proc/%s/task/*/status' is %s" % (pid, output)) task_list = output.split('\n')[2:] vcpu_task = task_list[int(vcpu)] tmp = int(vcpu_task.split('\t')[1].split(',')[-1]) actual_pinned_cpu = math.log(tmp, 2) else: logger.error("unsupported host os version: %s" % host_kernel_version) return 1 elif 'xen' in hypervisor: get_expected_pinned_cpu_cmd = "virsh vcpuinfo %s|grep -1 ^VCPU.*[^0-9]%s$|tail -1|cut -d: -f2" % ( domain_name, vcpu) status, actual_pinned_cpu_str = commands.getstatusoutput( get_expected_pinned_cpu_cmd) actual_pinned_cpu = int(actual_pinned_cpu_str) else: logger.info("unsupported hypervisor type: %s" % hypervisor) return 1 logger.info("the actual pinned cpu is %s" % actual_pinned_cpu) shell_cmd = "virsh vcpuinfo %s" % domain_name (status, text) = commands.getstatusoutput(shell_cmd) logger.debug("after pinning, the vcpu status is %s" % text) if actual_pinned_cpu == expected_pinned_cpu: logger.info("actual_pinned_physical_cpu is \ equal to expected_pinned_physical_cpu") return 0 else: logger.info("actual_pinned_physical_cpu is \ not equal to expected_pinned_physical_cpu") return 1
def reattach(params): """Reattach a specific node device and removed it from pci-stub driver, argument 'params' is a dictionary type and includes 'pciaddress' key, whose value uniquely identify a pci address of the node device """ global logger logger = params['logger'] pciaddress = params['pciaddress'] original_driver = check_node_reattach(pciaddress) logger.info("original device driver: %s" % original_driver) kernel_version = utils.get_host_kernel_version() hypervisor = utils.get_hypervisor() pciback = '' if hypervisor == 'kvm': pciback = 'pci-stub' if hypervisor == 'xen': pciback = 'pciback' if 'el5' in kernel_version: vendor_product_get = "lspci -n |grep %s|awk '{print $3}'" % pciaddress logger.debug("the vendor:product is %s" % vendor_product_get) (status, retval) = commands.getstatusoutput(vendor_product_get) if status != 0: logger.error("failed to get vendor product ID") return 1 else: vendor_ID = retval.split(":")[0] product_ID = retval.split(":")[1] device_name = "pci_%s_%s" % (vendor_ID, product_ID) else: (bus, slot_func) = pciaddress.split(":") (slot, func) = slot_func.split(".") device_name = "pci_0000_%s_%s_%s" % (bus, slot, func) logger.debug("the name of the pci device is: %s" % device_name) conn = sharedmod.libvirtobj['conn'] try: nodeobj = conn.nodeDeviceLookupByName(device_name) nodeobj.reAttach() logger.info("reattach the node device") current_driver = check_node_reattach(pciaddress) logger.info("current device driver: %s" % current_driver) if original_driver == pciback and current_driver != pciback: logger.info("the node %s device reattach is successful" % device_name) else: logger.info("the node %s device reattach is failed" % device_name) return 1 except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) return 1 return 0
def detach(params): """Dettach a specific node device and bind it to pci-stub driver, argument 'params' is a dictionary type and includes 'pciaddress' key, whose value uniquely identify a pci address of the node device """ global logger logger = params['logger'] pciaddress = params['pciaddress'] original_driver = check_node_detach(pciaddress) logger.info("original device driver: %s" % original_driver) kernel_version = utils.get_host_kernel_version() hypervisor = utils.get_hypervisor() pciback = '' if hypervisor == 'kvm': pciback = 'pci-stub' if hypervisor == 'xen': pciback = 'pciback' if 'el5' in kernel_version: vendor_product_get = "lspci -n |grep %s|awk '{print $3}'" % pciaddress logger.debug("the vendor:product is %s" % vendor_product_get) (status, retval) = commands.getstatusoutput(vendor_product_get) if status != 0: logger.error("failed to get vendor product ID") return 1 else: vendor_ID = retval.split(":")[0] product_ID = retval.split(":")[1] device_name = "pci_%s_%s" % (vendor_ID, product_ID) else: (bus, slot_func) = pciaddress.split(":") (slot, func) = slot_func.split(".") device_name = "pci_0000_%s_%s_%s" % (bus, slot, func) logger.debug("the name of the pci device is: %s" % device_name) conn = sharedmod.libvirtobj['conn'] try: nodeobj = conn.nodeDeviceLookupByName(device_name) logger.info("detach the node device") nodeobj.dettach() current_driver = check_node_detach(pciaddress) logger.info("current device driver: %s" % current_driver) if current_driver != original_driver and current_driver == pciback: logger.info("the node %s device detach is successful" % device_name) else: logger.info("the node %s device detach is failed" % device_name) return 1 except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) return 1 return 0
def vcpu_affinity_check(domain_name, vcpu, expected_pinned_cpu, hypervisor): """check the task in the process of the running virtual machine grep Cpus_allowed_list /proc/PID/task/*/status """ host_kernel_version = utils.get_host_kernel_version() if 'qemu' in hypervisor: get_pid_cmd = "cat /var/run/libvirt/qemu/%s.pid" % domain_name status, pid = commands.getstatusoutput(get_pid_cmd) if status: logger.error("failed to get the pid of \ the running virtual machine process") return 1 if 'el6' or 'el7' in host_kernel_version: cmd_vcpu_task_id = "virsh qemu-monitor-command %s --hmp info cpus|grep '#%s'|cut -d '=' -f3"\ % (domain_name,vcpu) status, output = commands.getstatusoutput(cmd_vcpu_task_id) vcpu_task_id = output[:output.find("^")] logger.debug("vcpu id %s:" % vcpu_task_id) cmd_get_task_list = "grep Cpus_allowed_list /proc/%s/task/%s/status" % (pid , vcpu_task_id) status, output = commands.getstatusoutput(cmd_get_task_list) logger.debug("the output of command 'grep Cpus_allowed_list \ /proc/%s/task/%s/status' is %s" % (pid,vcpu_task_id,output)) actual_pinned_cpu = int(output.split('\t')[1]) elif 'el5' in host_kernel_version: cmd_get_task_list = "grep Cpus_allowed /proc/%s/task/*/status" % pid status, output = commands.getstatusoutput(cmd_get_task_list) logger.debug("the output of command 'grep Cpus_allowed \ /proc/%s/task/*/status' is %s" % (pid, output)) task_list = output.split('\n')[2:] vcpu_task = task_list[int(vcpu)] tmp = int(vcpu_task.split('\t')[1].split(',')[-1]) actual_pinned_cpu = math.log(tmp, 2) else: logger.error("unsupported host os version: %s" % host_kernel_version) return 1 elif 'xen' in hypervisor: get_expected_pinned_cpu_cmd = "virsh vcpuinfo %s|grep -1 ^VCPU.*[^0-9]%s$|tail -1|cut -d: -f2" % (domain_name, vcpu) status, actual_pinned_cpu_str = commands.getstatusoutput(get_expected_pinned_cpu_cmd) actual_pinned_cpu = int(actual_pinned_cpu_str) else: logger.info("unsupported hypervisor type: %s" % hypervisor) return 1 logger.info("the actual pinned cpu is %s" % actual_pinned_cpu) shell_cmd = "virsh vcpuinfo %s" % domain_name (status, text) = commands.getstatusoutput(shell_cmd) logger.debug("after pinning, the vcpu status is %s" % text) if actual_pinned_cpu == expected_pinned_cpu: logger.info("actual_pinned_physical_cpu is \ equal to expected_pinned_physical_cpu") return 0 else: logger.info("actual_pinned_physical_cpu is \ not equal to expected_pinned_physical_cpu") return 1
def reset(params): """Reset a specific node device and return clean & certain status to it""" global logger logger = params['logger'] pciaddress = params['pciaddress'] kernel_version = utils.get_host_kernel_version() if 'el5' in kernel_version: vendor_product_get = "lspci -n |grep %s|awk '{print $3}'" % pciaddress logger.debug("the vendor:product is %s" % vendor_product_get) (status, retval) = commands.getstatusoutput(vendor_product_get) if status != 0: logger.error("failed to get vendor product ID") return 1 else: vendor_ID = retval.split(":")[0] product_ID = retval.split(":")[1] device_name = "pci_%s_%s" % (vendor_ID, product_ID) else: (bus, slot_func) = pciaddress.split(":") (slot, func) = slot_func.split(".") device_name = "pci_0000_%s_%s_%s" % (bus, slot, func) conn = sharedmod.libvirtobj['conn'] try: nodeobj = conn.nodeDeviceLookupByName(device_name) nodeobj.reset() logger.info("reset the node device") logger.info("the node %s device reset is successful" % device_name) except libvirtError as e: logger.error("API error message: %s, error code is %s" % (e.message, e.get_error_code())) logger.error("Error: fail to reset %s node device" % device_name) return 1 return 0
test_log = 'test.log' case_dir = 'ali-cases' suites = [ 'vm_lifecycle', 'boot', 'vcpu', 'memory', 'disk', 'network', 'reboot', 'multiqueue', 'cpu_feature', 'gshell' ] #suites = ['memory', 'multiqueue', 'network'] run_cmd = 'sudo %s libvirt-test-api -c %s -f %s 2>&1 | tee -a %s' default_proc_num = 3 test_type = 'FT' web_ip = '11.238.144.40' web_port = '80' upload_url = 'http://%s:%s/api/report/function_test' % (web_ip, web_port) token = 'virt-api-key' kernel = utils.get_host_kernel_version() if kernel.startswith('2.6.32'): python = '/usr/local/python/bin/python' else: # always use the houyi python python = '/usr/local/python/bin/python' def add_remove(tlist, opt_list): ''' add/remove item in tlist. opt_list is a list like ['+ts5', '-ts2'] or ['+tc5', '-tc3']. ''' for i in opt_list: i = i.strip() if i.startswith('+'):