def module_load_unload(self, mod1): """ Unloading and loading the given module """ if linux_modules.module_is_loaded(mod1) is False: linux_modules.load_module(mod1) time.sleep(self.load_unload_sleep_time) sub_mod = linux_modules.get_submodules(mod1) if sub_mod: for mod in sub_mod: linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break if linux_modules.unload_module(mod1) is False: self.fail("Unloading Module %s failed" % mod1) time.sleep(self.load_unload_sleep_time) cmd = "%s %s=%s" % (mod1, self.param_name, self.param_value) if linux_modules.load_module(cmd) is False: self.fail("Param %s = Value %s Failed for Module %s" % (self.param_name, self.param_value, mod1)) time.sleep(self.load_unload_sleep_time) if self.sysfs_chk: if self.sysfs_value_check() is False: self.fail("Sysfs check failed ") if not wait.wait_for(configure_network.is_interface_link_up, timeout=120, args=[self.ifaces]): self.fail("Link up of interface is taking longer than 120s") if not configure_network.ping_check( self.ifaces, self.peer, '1000', flood=True): self.fail("ping test failed")
def setUp(self): """ Sets up NVMf configuration """ self.nss = self.params.get('namespaces', default='') self.peer_ips = self.params.get('peer_ips', default='') if not self.nss or not self.peer_ips: self.cancel("No inputs provided") self.peer_user = self.params.get("peer_user", default="root") self.peer_password = self.params.get("peer_password", default=None) self.nss = self.nss.split(' ') self.peer_ips = self.peer_ips.split(' ') self.ids = range(1, len(self.peer_ips) + 1) if len(self.nss) != len(self.peer_ips): self.cancel("Count of namespace and peer ips mismatch") smm = SoftwareManager() if not smm.check_installed("nvme-cli") and not \ smm.install("nvme-cli"): self.cancel('nvme-cli is needed for the test to be run') try: if not linux_modules.module_is_loaded("nvme-rdma"): linux_modules.load_module("nvme-rdma") except CmdError: self.cancel("nvme-rdma module not loadable") self.cfg_tmpl = self.get_data("nvmf_template.cfg") dirname = os.path.dirname(os.path.abspath(self.cfg_tmpl)) self.cfg_file = os.path.join(dirname, "nvmf.cfg") self.nvmf_discovery_file = "/etc/nvme/discovery.conf"
def setUp(self): """ Setup the device. """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") if os.path.exists('/proc/device-tree/bmc'): self.cancel("Test Unsupported! on this platform") if 'pSeries' in open('/proc/cpuinfo', 'r').read(): for mdl in ['rpaphp', 'rpadlpar_io']: if not linux_modules.module_is_loaded(mdl): linux_modules.load_module(mdl) elif 'PowerNV' in open('/proc/cpuinfo', 'r').read(): if not linux_modules.module_is_loaded("pnv_php"): linux_modules.load_module("pnv_php") self.dic = {} self.device = self.params.get('pci_devices', default="") self.peer_ip = self.params.get('peer_ip', default="") self.count = int(self.params.get('count', default='1')) if not self.device: self.cancel("PCI_address not given") self.device = self.device.split(" ") smm = SoftwareManager() if not smm.check_installed("pciutils") and not smm.install("pciutils"): self.cancel("pciutils package is need to test") for pci_addr in self.device: if not os.path.isdir('/sys/bus/pci/devices/%s' % pci_addr): self.cancel("%s not present in device path" % pci_addr) slot = pci.get_slot_from_sysfs(pci_addr) if not slot: self.cancel("slot number not available for: %s" % pci_addr) self.dic[pci_addr] = slot
def init_new(self): """ Create new dbfile without any configuration. """ self.pid_files_path = "/var/run/openvswitch" self.db_path = os.path.join(self.tmpdir, "conf.db") self.db_socket = os.path.join(self.pid_files_path, "db.sock") self.db_pidfile = utils_misc.get_pid_path("ovsdb-server", self.pid_files_path) self.ovs_pidfile = utils_misc.get_pid_path("ovs-vswitchd", self.pid_files_path) self.dbschema = "/usr/share/openvswitch/vswitch.ovsschema" self.cleanup = True sm = factory(ServiceManager)() # Stop system openvswitch try: sm.stop("openvswitch") except process.CmdError: pass linux_modules.load_module("openvswitch") self.clean() if os.path.exists(self.db_path): os.remove(self.db_path) self.init_db() self.start_ovs_vswitchd()
def setUp(self): """ get parameters """ self.module = self.params.get('module', default=None) self.param_name = self.params.get('module_param_name', default=None) self.param_value = self.params.get('module_param_value', default=None) self.mpath_enabled = self.params.get('multipath_enabled', default=False) self.disk = self.params.get('disk', default=None) self.load_unload_sleep_time = 30 self.error_modules = [] self.uname = linux_modules.platform.uname()[2] if not self.module: self.cancel("Please provide the Module name") if not self.disk: self.cancel("Please provide the Disk name") if linux_modules.module_is_loaded(self.module) is False: linux_modules.load_module(self.module) time.sleep(self.load_unload_sleep_time) if self.built_in_module(self.module) is True: self.cancel("Module %s is Built-in Skipping " % self.module) if self.param_check() is False: self.cancel("Param %s is not Valid for Module %s" % (self.param_name, self.module))
def setUp(self): """ Sets up NVMf configuration """ self.nss = self.params.get('namespaces', default='') self.peer_ips = self.params.get('peer_ips', default='') if not self.nss or not self.peer_ips: self.cancel("No inputs provided") self.nss = self.nss.split(',') self.peer_ips = self.peer_ips.split(',') self.ids = range(1, len(self.peer_ips) + 1) if len(self.nss) != len(self.peer_ips): self.cancel("Count of namespace and peer ips mismatch") smm = SoftwareManager() if not smm.check_installed("nvme-cli") and not \ smm.install("nvme-cli"): self.cancel('nvme-cli is needed for the test to be run') try: if not linux_modules.module_is_loaded("nvme-rdma"): linux_modules.load_module("nvme-rdma") except CmdError: self.cancel("nvme-rdma module not loadable") self.cfg_tmpl = self.get_data("nvmf_template.cfg") self.cfg_file = self.get_data("nvmf.cfg") self.nvmf_discovery_file = "/etc/nvme/discovery.conf"
def setUp(self): """ Setup the device. """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") if os.path.exists('/proc/device-tree/bmc'): self.cancel("Test Unsupported! on this platform") if cpu._list_matches( open('/proc/cpuinfo').readlines(), 'platform\t: pSeries\n'): self.power_vm = True for mdl in ['rpaphp', 'rpadlpar_io']: if not linux_modules.module_is_loaded(mdl): linux_modules.load_module(mdl) elif cpu._list_matches( open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): self.power_vm = False if not linux_modules.module_is_loaded("pnv_php"): linux_modules.load_module("pnv_php") self.dic = {} self.device = self.params.get('pci_devices', default=' ').split(",") self.count = int(self.params.get('count', default='1')) if not self.device: self.cancel("PCI_address not given") for pci_addr in self.device: if not os.path.isdir('/sys/bus/pci/devices/%s' % pci_addr): self.cancel("%s not present in device path" % pci_addr) slot = self.get_slot(pci_addr) if not slot: self.cancel("slot number not available for: %s" % pci_addr) self.dic[pci_addr] = slot
def tearDown(self): """ Restore back the default Parameters """ self.log.info("Restoring Default param") linux_modules.unload_module(self.module) linux_modules.load_module(self.module) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(self.module) is False: self.fail("Cannot restore default values for Module : %s" % self.module)
def tearDown(self): """ Restore back the default Parameters """ self.log.info("Restoiring Default param") if self.mpath_enabled is True: if not wait.wait_for(self.is_mpath_flushed, timeout=90): self.fail("multipath is in USE and cannot be flushed") linux_modules.unload_module(self.module) linux_modules.load_module(self.module) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(self.module) is False: self.fail("Cannot restore default values for Module : %s" % self.module) self.log.info("Restore of default param is success")
def init_system(self): """ Create new dbfile without any configuration. """ sm = factory(ServiceManager)() try: if linux_modules.load_module("openvswitch"): sm.restart("openvswitch") except process.CmdError: logging.error("Service OpenVSwitch is probably not" " installed in system.") raise self.pid_files_path = "/var/run/openvswitch/"
def setUp(self): """ Check pre-requisites before running sensors command Testcase should be executed only on bare-metal environment. """ s_mg = SoftwareManager() d_distro = distro.detect() if d_distro.name == "Ubuntu": if not s_mg.check_installed("lm-sensors") and not s_mg.install( "lm-sensors"): self.cancel('Need sensors to run the test') elif d_distro.name == "SuSE": if not s_mg.check_installed("sensors") and not s_mg.install( "sensors"): self.cancel('Need sensors to run the test') else: if not s_mg.check_installed("lm_sensors") and not s_mg.install( "lm_sensors"): self.cancel('Need sensors to run the test') if d_distro.arch in ["ppc64", "ppc64le"]: if not cpu._list_matches( open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): self.cancel( 'sensors test is applicable to bare-metal environment.') config_check = linux_modules.check_kernel_config( 'CONFIG_SENSORS_IBMPOWERNV') if config_check == 0: self.cancel('Config is not set') elif config_check == 1: if linux_modules.load_module('ibmpowernv'): if linux_modules.module_is_loaded('ibmpowernv'): self.log.info('Module Loaded Successfully') else: self.cancel('Module Loading Failed') else: self.log.info('Module is Built In') if not d_distro.name == "Ubuntu": try: process.run('service lm_sensors stop', sudo=True) process.run('service lm_sensors start', sudo=True) process.run('service lm_sensors status', sudo=True) except process.CmdError: self.error( 'Starting Service Failed. Make sure module is loaded') cmd = "yes | sudo sensors-detect" det_op = process.run(cmd, shell=True, ignore_status=True).stdout if 'no sensors were detected' in det_op: self.cancel('No sensors found to test !')
def setUp(self): """ Setup the device. """ cmd = "uname -p" if 'ppc' not in process.system_output(cmd, ignore_status=True): self.cancel("Processor is not ppc64") cmd = "cat /proc/cpuinfo" if cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: pSeries\n'): PowerVM = True for mdl in ['rpaphp', 'rpadlpar_io']: if not linux_modules.module_is_loaded(mdl): linux_modules.load_module(mdl) elif cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): PowerVM = False if not linux_modules.module_is_loaded("pnv_php"): linux_modules.load_module("pnv_php") self.return_code = 0 self.device = self.params.get('pci_device', default=' ') self.num_of_hotplug = int(self.params.get('num_of_hotplug', default='1')) if not os.path.isdir('/sys/bus/pci/devices/%s' % self.device): self.cancel("PCI device given does not exist") if PowerVM: devspec = genio.read_file("/sys/bus/pci/devices/%s/devspec" % self.device) self.slot = genio.read_file("/proc/device-tree/%s/ibm,loc-code" % devspec) self.slot = re.match(r'((\w+)[\.])+(\w+)-P(\d+)-C(\d+)|Slot(\d+)', self.slot).group() else: self.slot = pci.get_pci_prop(self.device, "PhySlot") if not os.path.isdir('/sys/bus/pci/slots/%s' % self.slot): self.cancel("%s Slot not available" % self.slot) if not os.path.exists('/sys/bus/pci/slots/%s/power' % self.slot): self.cancel("%s Slot does not support hotplug" % self.slot)
def setUp(self): """ Setup the device. """ if 'ppc' not in platform.processor(): self.cancel("Processor is not ppc64") if os.path.exists('/proc/device-tree/bmc'): self.cancel("Test Unsupported! on this platform") cmd = "cat /proc/cpuinfo" if cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: pSeries\n'): PowerVM = True for mdl in ['rpaphp', 'rpadlpar_io']: if not linux_modules.module_is_loaded(mdl): linux_modules.load_module(mdl) elif cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): PowerVM = False if not linux_modules.module_is_loaded("pnv_php"): linux_modules.load_module("pnv_php") self.return_code = 0 self.device = self.params.get('pci_device', default=' ') self.num_of_hotplug = int(self.params.get('num_of_hotplug', default='1')) if not os.path.isdir('/sys/bus/pci/devices/%s' % self.device): self.cancel("PCI device given does not exist") if PowerVM: devspec = genio.read_file("/sys/bus/pci/devices/%s/devspec" % self.device) self.slot = genio.read_file("/proc/device-tree/%s/ibm,loc-code" % devspec) self.slot = re.match(r'((\w+)[\.])+(\w+)-P(\d+)-C(\d+)|Slot(\d+)', self.slot).group() else: self.slot = pci.get_pci_prop(self.device, "PhySlot") if not os.path.isdir('/sys/bus/pci/slots/%s' % self.slot): self.cancel("%s Slot not available" % self.slot) if not os.path.exists('/sys/bus/pci/slots/%s/power' % self.slot): self.cancel("%s Slot does not support hotplug" % self.slot)
def setUp(self): """ Check pre-requisites before running sensors command Testcase should be executed only on bare-metal environment. """ s_mg = SoftwareManager() d_distro = distro.detect() if d_distro.name == "Ubuntu": if not s_mg.check_installed("lm-sensors") and not s_mg.install( "lm-sensors"): self.cancel('Need sensors to run the test') elif d_distro.name == "SuSE": if not s_mg.check_installed("sensors") and not s_mg.install( "sensors"): self.cancel('Need sensors to run the test') else: if not s_mg.check_installed("lm_sensors") and not s_mg.install( "lm_sensors"): self.cancel('Need sensors to run the test') if d_distro.arch in ["ppc64", "ppc64le"]: if not cpu._list_matches(open('/proc/cpuinfo').readlines(), 'platform\t: PowerNV\n'): self.cancel( 'sensors test is applicable to bare-metal environment.') config_check = linux_modules.check_kernel_config( 'CONFIG_SENSORS_IBMPOWERNV') if config_check == 0: self.cancel('Config is not set') elif config_check == 1: if linux_modules.load_module('ibmpowernv'): if linux_modules.module_is_loaded('ibmpowernv'): self.log.info('Module Loaded Successfully') else: self.cancel('Module Loading Failed') else: self.log.info('Module is Built In') if not d_distro.name == "Ubuntu": try: process.run('service lm_sensors stop', sudo=True) process.run('service lm_sensors start', sudo=True) process.run('service lm_sensors status', sudo=True) except process.CmdError: self.error( 'Starting Service Failed. Make sure module is loaded') cmd = "yes | sudo sensors-detect" det_op = process.run(cmd, shell=True, ignore_status=True).stdout if 'no sensors were detected' in det_op: self.cancel('No sensors found to test !')
def init_new(self): """ Create new dbfile without any configuration. """ self.db_path = os.path.join(self.tmpdir, "conf.db") self.db_socket = os.path.join(self.tmpdir, "db.sock") self.db_pidfile = utils_misc.get_pid_path("ovsdb-server") self.ovs_pidfile = utils_misc.get_pid_path("ovs-vswitchd") self.dbschema = "/usr/share/openvswitch/vswitch.ovsschema" self.cleanup = True sm = ServiceManager() # Stop system openvswitch try: sm.stop("openvswitch") except process.CmdError: pass linux_modules.load_module("openvswitch") self.clean() if os.path.exists(self.db_path): os.remove(self.db_path) self.init_db() self.start_ovs_vswitchd()
def module_load_unload(self, module_list): """ Unloading and loading the given module """ for mod1 in module_list: if linux_modules.module_is_loaded(mod1) is False: linux_modules.load_module(mod1) time.sleep(self.load_unload_sleep_time) for mdl in module_list: for _ in range(0, self.iteration): sub_mod = self.get_depend_modules(mdl) if sub_mod: for mod in sub_mod.split(' '): if mod == 'multipath': if self.flush_mpath(mdl) is False: self.error_modules.append(mdl) break else: self.log.info("unloading sub module %s " % mod) linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break self.log.info("unloading module %s " % mdl) linux_modules.unload_module(mdl) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(mdl) is True: self.error_modules.append(mdl) break self.log.info("loading module : %s " % mdl) linux_modules.load_module(mdl) time.sleep(self.load_unload_sleep_time) if linux_modules.module_is_loaded(mdl) is False: self.error_modules.append(mdl) break
def check_change_startuppolicy_cdrom_backend(vm, params, origin_device_xml, test): """ Check live update cdrom with new source type and startupPolicy BZ2003644 :param vm: one object representing VM :param params: wrapped parameters in dictionary format :param origin_device_xml: original device xml before updated :param test: test assert object """ # Create block type cdrom disk, and update VM with it if 'startupPolicy' in params: params.pop('startupPolicy') origin_first_iso_path = params.get("virt_disk_device_source") # Load module and get scsi disk name utils_misc.wait_for( lambda: linux_modules.load_module("scsi_debug lbpu=1 lbpws=1"), timeout=10, ignore_errors=True) scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() params.update({"virt_disk_device_source": scsi_disk}) params.update({"type_name": "block"}) iso_cdrom_disk_second = create_iso_cdrom_disk(params, create_iso=False) virsh.update_device(vm.name, iso_cdrom_disk_second.xml, flagstr="--live", ignore_status=False, debug=True) vm.wait_for_login().close() # Cdrom should be updated check_source_in_cdrom_device(vm, scsi_disk, test) # Restore to original filed based one virsh.update_device(vm.name, origin_device_xml.xml, flagstr="--live", ignore_status=False, debug=True) vm.wait_for_login().close() # Cdrom should be restored check_source_in_cdrom_device(vm, origin_first_iso_path, test)
def test(self): """ Runs rcutorture test for specified time. """ seconds = 15 os.chdir(self.logdir) if linux_modules.load_module("rcutorture"): self.cpus_toggle() time.sleep(seconds) self.cpus_toggle() linux_modules.unload_module("rcutorture") dmesg = process.system_output("dmesg") res = re.search(r"rcu-torture: Reader", dmesg, re.M | re.I) self.results = str(res).splitlines() """ Runs log ananlysis on the dmesg logs Checks for know bugs """ pipe1 = [r for r in self.results if "!!! Reader Pipe:" in r] if len(pipe1) != 0: self.error("\nBUG: grace-period failure !") pipe2 = [r for r in self.results if "Reader Pipe" in r] for p in pipe2: nmiss = p.split(" ")[7] if int(nmiss): self.error("\nBUG: rcutorture tests failed !") batch = [s for s in self.results if "Reader Batch" in s] for b in batch: nmiss = b.split(" ")[7] if int(nmiss): self.log.info("\nWarning: near mis failure !!")
def test(self): """ Runs rcutorture test for specified time. """ seconds = 15 os.chdir(self.logdir) if linux_modules.load_module('rcutorture'): self.cpus_toggle() time.sleep(seconds) self.cpus_toggle() linux_modules.unload_module('rcutorture') dmesg = process.system_output('dmesg') res = re.search(r'rcu-torture: Reader', dmesg, re.M | re.I) self.results = str(res).splitlines() """ Runs log ananlysis on the dmesg logs Checks for know bugs """ pipe1 = [r for r in self.results if "!!! Reader Pipe:" in r] if len(pipe1) != 0: self.error('\nBUG: grace-period failure !') pipe2 = [r for r in self.results if "Reader Pipe" in r] for p in pipe2: nmiss = p.split(" ")[7] if int(nmiss): self.error('\nBUG: rcutorture tests failed !') batch = [s for s in self.results if "Reader Batch" in s] for b in batch: nmiss = b.split(" ")[7] if int(nmiss): self.log.info("\nWarning: near mis failure !!")
def module_parameter_test(self): """ Unloading and loading the given module """ if self.mpath_enabled is True: if not wait.wait_for(self.is_mpath_flushed, timeout=90): self.fail("multipath is in USE and cannot be flushed") else: sub_mod = linux_modules.get_submodules(self.module) if sub_mod: for mod in sub_mod.split(' '): linux_modules.unload_module(mod) if linux_modules.module_is_loaded(mod) is True: self.error_modules.append(mod) break self.log.info("Testing %s=%s" % (self.param_name, self.param_value)) self.log.info("unloading driver module: %s" % self.module) if linux_modules.unload_module(self.module) is False: self.fail("Unloading Module %s failed" % self.module) time.sleep(self.load_unload_sleep_time) self.log.info("loading driver with %s=%s" % (self.param_name, self.param_value)) cmd = "%s %s=%s" % (self.module, self.param_name, self.param_value) if linux_modules.load_module(cmd) is False: self.fail("Param %s = Value %s Failed for Module %s" % (self.param_name, self.param_value, self.module)) else: self.log.info("Driver module=%s loaded successfully" % cmd) self.log.info("checking sysfs for %s after successful load" % cmd) if self.sysfs_value_check() is False: self.fail("Sysfs check failed ") self.log.info("sysfs check for %s success" % cmd) self.log.info("Running DD after %s changed" % cmd) if self.dd_run() is False: self.fail("dd run failed on disk: %s" % self.disk) self.log.info("DD run for %s is success" % cmd)
def run(test, params, env): """ Hotplug/unhotplug virtio-vsock device 1. Boot guest without virtio-vsock-pci device 2. Hotplug virtio-vsock device 3. Check device inside guest(lspci/dmesg) 4. Transfer data from guest to host 5. Unplug virtio-vsock device 6. Cancel the vsock process on host 7. Reboot guest :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment """ linux_modules.load_module('vhost_vsock') vm = env.get_vm(params['main_vm']) session = vm.wait_for_login() guest_cid = utils_vsock.get_guest_cid(3) vsock_id = 'hotplugged_vsock' vsock_params = {'id': vsock_id, 'guest-cid': guest_cid} vsock_test_tool = params["vsock_test_tool"] if '-mmio:' in params.get('machine_type'): dev_vsock = qdevices.QDevice('vhost-vsock-device', vsock_params) elif params.get('machine_type').startswith("s390"): vsock_params['devno'] = params.get('devno') dev_vsock = qdevices.QDevice("vhost-vsock-ccw", vsock_params) else: dev_vsock = qdevices.QDevice('vhost-vsock-pci', vsock_params) vm.devices.simple_hotplug(dev_vsock, vm.monitor) error_context.context( 'Check vsock device exist in guest lspci and ' 'dmesg output.', logging.info) addr_pattern = params['addr_pattern'] device_pattern = params['device_pattern'] check_vsock_cmd = params.get('check_vsock_cmd', 'lspci') time.sleep(10) lspci_output = session.cmd_output(check_vsock_cmd) device_str = re.findall(r'%s\s%s' % (addr_pattern, device_pattern), lspci_output) if params.get('dmesg_check') == 'yes': if not device_str: test.fail('check_vsock_cmd failed, no device "%s"' % device_pattern) else: address = re.findall(addr_pattern, device_str[0])[0] chk_dmesg_cmd = 'dmesg' output = re.findall(address, session.cmd_output(chk_dmesg_cmd)) if not output: test.fail('dmesg failed, no info related to %s' % address) else: error_msg = '' for o in output: if re.search(r'fail|error', o, re.I): error_msg += '%s' % o break if error_msg: test.fail("dmesg check failed: %s" % error_msg) # Transfer data from guest to host try: if vsock_test_tool == "nc_vsock": tool_bin = vsock_test.compile_nc_vsock(test, vm, session) if vsock_test_tool == "ncat": tool_bin = path.find_command("ncat") tmp_file = "/tmp/vsock_file_%s" % utils_misc.generate_random_string(6) rec_session = vsock_test.send_data_from_guest_to_host(session, tool_bin, guest_cid, tmp_file, file_size=10000) vsock_negative_test.check_data_received(test, rec_session, tmp_file) vm.devices.simple_unplug(dev_vsock, vm.monitor) vsock_negative_test.kill_host_receive_process(test, rec_session) vsock_test.check_guest_vsock_conn_exit(test, session) finally: session.cmd_output("rm -f %s" % tmp_file) session.close() vm.reboot()
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': {'dev': disk_path}}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") time.sleep(5) scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")
def run(test, params, env): """ Test domfstrim command, make sure that all supported options work well Test scenaries: 1. fstrim without options 2. fstrim with --minimum with large options 3. fstrim with --minimum with small options Note: --mountpoint still not supported so will not test here """ def recompose_xml(vm_name, scsi_disk): """ Add scsi disk, guest agent and scsi controller for guest :param: vm_name: Name of domain :param: scsi_disk: scsi_debug disk name """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) disk_path = scsi_disk # Add scsi disk xml scsi_disk = Disk(type_name="block") scsi_disk.device = "lun" scsi_disk.source = scsi_disk.new_disk_source( **{'attrs': { 'dev': disk_path }}) scsi_disk.target = {'dev': "sdb", 'bus': "scsi"} find_scsi = "no" controllers = vmxml.xmltreefile.findall("devices/controller") for controller in controllers: if controller.get("type") == "scsi": find_scsi = "yes" vmxml.add_device(scsi_disk) # Add scsi disk controller if find_scsi == "no": scsi_controller = Controller("controller") scsi_controller.type = "scsi" scsi_controller.index = "0" scsi_controller.model = "virtio-scsi" vmxml.add_device(scsi_controller) # Redefine guest vmxml.sync() if not virsh.has_help_command('domfstrim'): test.cancel("This version of libvirt does not support " "the domfstrim test") try: utils_path.find_command("lsscsi") except utils_path.CmdNotFoundError: test.cancel("Command 'lsscsi' is missing. You must " "install it.") vm_name = params.get("main_vm", "avocado-vt-vm1") status_error = ("yes" == params.get("status_error", "no")) minimum = params.get("domfstrim_minimum") mountpoint = params.get("domfstrim_mountpoint") options = params.get("domfstrim_options", "") is_fulltrim = ("yes" == params.get("is_fulltrim", "yes")) uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') has_qemu_ga = not ("yes" == params.get("no_qemu_ga", "no")) start_qemu_ga = not ("yes" == params.get("no_start_qemu_ga", "no")) if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Do backup for origin xml xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) try: vm = env.get_vm(vm_name) if not vm.is_alive(): vm.start() session = vm.wait_for_login() bef_list = session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split("\n") session.close() vm.destroy() # Load module and get scsi disk name linux_modules.load_module("scsi_debug lbpu=1 lbpws=1") scsi_disk = process.run("lsscsi|grep scsi_debug|" "awk '{print $6}'", shell=True).stdout_text.strip() # Create partition with open("/tmp/fdisk-cmd", "w") as cmd_file: cmd_file.write("n\np\n\n\n\nw\n") output = process.run("fdisk %s < /tmp/fdisk-cmd" % scsi_disk, shell=True).stdout_text.strip() logging.debug("fdisk output %s", output) os.remove("/tmp/fdisk-cmd") # Format disk output = process.run("mkfs.ext3 %s1" % scsi_disk, shell=True).stdout_text.strip() logging.debug("output %s", output) # Add scsi disk in guest recompose_xml(vm_name, scsi_disk) # Prepare guest agent and start guest if has_qemu_ga: vm.prepare_guest_agent(start=start_qemu_ga) else: # Remove qemu-ga channel vm.prepare_guest_agent(channel=has_qemu_ga, start=False) guest_session = vm.wait_for_login() # Get new generated disk af_list = guest_session.cmd_output("fdisk -l|grep ^/dev|" "cut -d' ' -f1").split('\n') new_disk = "".join(list(set(bef_list) ^ set(af_list))) # Mount disk in guest guest_session.cmd("mkdir -p /home/test && mount %s /home/test" % new_disk) # Do first fstrim before all to get original map for compare cmd_result = virsh.domfstrim(vm_name) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) def get_diskmap_size(): """ Collect size from disk map :return: disk size """ map_cmd = "cat /sys/bus/pseudo/drivers/scsi_debug/map" diskmap = process.run(map_cmd, shell=True).stdout_text.strip('\n\x00') sum = 0 for i in diskmap.split(","): sum = sum + int(i.split("-")[1]) - int(i.split("-")[0]) logging.debug("disk map (size:%d) is %s", sum, diskmap) return sum ori_size = get_diskmap_size() # Write date in disk dd_cmd = "dd if=/dev/zero of=/home/test/file bs=1048576 count=5; sync" guest_session.cmd(dd_cmd) def _full_mapped(): """ Do full map check :return: True or False """ full_size = get_diskmap_size() return (ori_size < full_size) if not utils_misc.wait_for(_full_mapped, timeout=30): test.error("Scsi map is not updated after dd command.") full_size = get_diskmap_size() # Remove disk content in guest guest_session.cmd("rm -rf /home/test/*; sync") guest_session.close() def _trim_completed(): """ Do empty fstrim check :return: True of False """ cmd_result = virsh.domfstrim(vm_name, minimum, mountpoint, options, unprivileged_user=unprivileged_user, uri=uri) if cmd_result.exit_status != 0: if not status_error: test.fail("Fail to do virsh domfstrim, error %s" % cmd_result.stderr) else: logging.info("Fail to do virsh domfstrim as expected: %s", cmd_result.stderr) return True empty_size = get_diskmap_size() logging.info("Trimmed disk to %d", empty_size) if is_fulltrim: return empty_size <= ori_size else: # For partly trim will check later return False if not utils_misc.wait_for(_trim_completed, timeout=30): # Get result again to check partly fstrim empty_size = get_diskmap_size() if not is_fulltrim: if ori_size < empty_size <= full_size: logging.info("Success to do fstrim partly") return True test.fail("Fail to do fstrim. (original size: %s), " "(current size: %s), (full size: %s)" % (ori_size, empty_size, full_size)) logging.info("Success to do fstrim") finally: # Do domain recovery vm.shutdown() xml_backup.sync() linux_modules.unload_module("scsi_debug")
def run(test, params, env): """ Test 802.1Q vlan of NIC among guests and host. 1) Configure vlan interface over host bridge interface. 2) Create two VMs over vlan interface. 3) Load 8021q module in guest. 4) Configure ip address of guest with 192.168.*.* 5) Test by ping between guest and host, should fail. 6) Test by ping beween guests, should pass. 7) Setup vlan in guests and using hard-coded ip address 192.168.*.* 8) Test by ping between guest and host, should pass. 9) Test by ping among guests, should pass. 10) Test by netperf between guests and host. 11) Test by netperf between guests. 12) Delete vlan interface in host. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(interface, v_id, session=None): """ Create a vlan-device on interface. :params interface: Interface. :params v_id: Vlan id. :params session: VM session or none. """ vlan_if = '%s.%s' % (interface, v_id) add_cmd = "ip link add link %s name %s type " % (interface, vlan_if) add_cmd += "vlan id %s" % v_id error_context.context("Create vlan interface '%s' on %s" % (vlan_if, interface), logging.info) if session: session.cmd(add_cmd) else: process.system(add_cmd) return vlan_if def set_ip_vlan(vlan_if, vlan_ip, session=None): """ Set ip address of vlan interface. :params vlan_if: Vlan interface. :params vlan_ip: Vlan internal ip. :params session: VM session or none. """ error_context.context("Assign IP '%s' to vlan interface '%s'" % (vlan_ip, vlan_if), logging.info) if session: session.cmd("ifconfig %s 0.0.0.0" % vlan_if) session.cmd("ifconfig %s down" % vlan_if) session.cmd("ifconfig %s %s up" % (vlan_if, vlan_ip)) else: process.system("ifconfig %s %s up" % (vlan_if, vlan_ip)) def set_mac_vlan(vlan_if, mac_str, session): """ Give a new mac address for vlan interface in guest. :params: vlan_if: Vlan interface. :params: mac_str: New mac address for vlan. :params: session: VM session. """ mac_cmd = "ip link set %s add %s up" % (vlan_if, mac_str) error_context.context("Give a new mac address '%s' for vlan interface " "'%s'" % (mac_str, vlan_if), logging.info) session.cmd(mac_cmd) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def ping_vlan(vm, dest, vlan_if, session): """ Test ping between vlans, from guest to host/guest. :params vm: VM object :params dest: Dest ip to ping. :params vlan_if: Vlan interface. :params session: VM session. """ error_context.context("Test ping from '%s' to '%s' on guest '%s'" % (vlan_if, dest, vm.name)) status, output = utils_test.ping(dest=dest, count=10, interface=vlan_if, session=session, timeout=30) if status: raise NetPingError(vlan_if, dest, output) def netperf_vlan(client='main_vm', server='localhost', sub_type='netperf_stress'): """ Test netperf stress among guests and host. :params client: Netperf client. :params server: Netperf server. :params sub_type: Sub_type to run. """ params["netperf_client"] = client params["netperf_server"] = server error_context.context("Run netperf stress test among guests and host, " "server: %s, client: %s" % (server, client), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) vms = [] sessions = [] ifname = [] vm_ip = [] vm_vlan_ip = [] vm_vlan_if = [] sub_type = params["sub_type"] host_br = params.get("host_br", "switch") host_vlan_id = params.get("host_vlan_id", "10") host_vlan_ip = params.get("host_vlan_ip", "192.168.10.10") subnet = params.get("subnet", "192.168") mac_str = params.get("mac_str").split(',') os_type = params.get("os_type", "linux") linux_modules.load_module("8021q") host_vlan_if = "%s.%s" % (host_br, host_vlan_id) if host_vlan_if not in utils_net.get_net_if(): host_vlan_if = add_vlan(interface=host_br, v_id=host_vlan_id) if host_vlan_if in utils_net.get_net_if(): set_ip_vlan(vlan_if=host_vlan_if, vlan_ip=host_vlan_ip) rm_host_vlan_cmd = params["rm_host_vlan_cmd"] % host_vlan_if funcatexit.register(env, params["type"], _system, rm_host_vlan_cmd) else: test.cancel("Fail to set up vlan over bridge interface in host!") if params.get("start_vm", "yes") == "no": vm_main = env.get_vm(params["main_vm"]) vm_main.create(params=params) vm2 = env.get_vm("vm2") vm2.create(params=params) vms.append(vm_main) vms.append(vm2) else: vms.append(env.get_vm([params["main_vm"]])) vms.append(env.get_vm('vm2')) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): error_context.context("Prepare test env on %s" % vm.name) session = vm.wait_for_serial_login() if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) if os_type == "linux": interface = utils_net.get_linux_ifname(session, vm.get_mac_address()) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) inter_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 1) set_ip_vlan(interface, inter_ip, session=session) set_arp_ignore(session) error_context.context("Test ping from guest '%s' to host with " "interface '%s'" % (vm.name, interface), logging.info) try: ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, session=session) except NetPingError: logging.info("Guest ping fail to host as expected with " "interface '%s'" % interface) else: test.fail("Guest ping to host should fail with interface" " '%s'" % interface) ifname.append(interface) vm_ip.append(inter_ip) sessions.append(session) # Ping succeed between guests error_context.context("Test ping between guests with interface %s" % ifname[0], logging.info) ping_vlan(vms[0], dest=vm_ip[1], vlan_if=ifname[0], session=sessions[0]) # set vlan tag for guest for vm_index, vm in enumerate(vms): session = sessions[vm_index] error_context.context("Add vlan interface on guest '%s'" % vm.name) session.cmd("ifconfig %s 0.0.0.0" % ifname[vm_index]) vlan_if = add_vlan(interface=ifname[vm_index], v_id=host_vlan_id, session=session) vm_vlan_if.append(vlan_if) set_mac_vlan(vlan_if, mac_str[vm_index], session=session) vlan_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 11) set_ip_vlan(vlan_if, vlan_ip, session=session) vm_vlan_ip.append(vlan_ip) error_context.context("Test ping from interface '%s' on guest " "'%s' to host." % (vm_vlan_if[vm_index], vm.name), logging.info) ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], session=session) netperf_vlan(client=vm.name, server="localhost") error_context.context("Test ping and netperf between guests with " "interface '%s'" % vm_vlan_if[vm_index], logging.info) ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], session=sessions[0]) netperf_vlan(client=params["main_vm"], server='vm2') exithandlers = "exithandlers__%s" % sub_type sub_exit_timeout = int(params.get("sub_exit_timeout", 10)) start_time = time.time() end_time = start_time + float(sub_exit_timeout) while time.time() < end_time: logging.debug("%s (%f secs)", sub_type + " is running", (time.time() - start_time)) if env.data.get(exithandlers): break time.sleep(1) for sess in sessions: if sess: sess.close()
def bond_setup(self, arg1, arg2): ''' bond setup ''' if arg1 == "local": self.log.info("Configuring Bonding on Local machine") self.log.info("--------------------------------------") for ifs in self.host_interfaces: cmd = "ip addr flush dev %s" % ifs process.system(cmd, shell=True, ignore_status=True) for ifs in self.host_interfaces: cmd = "ip link set %s down" % ifs process.system(cmd, shell=True, ignore_status=True) linux_modules.load_module("bonding") genio.write_file(self.bonding_masters_file, "+%s" % self.bond_name) genio.write_file("%s/bonding/mode" % self.bond_dir, arg2) genio.write_file("%s/bonding/miimon" % self.bond_dir, self.miimon) genio.write_file("%s/bonding/fail_over_mac" % self.bond_dir, self.fail_over_mac) genio.write_file("%s/bonding/downdelay" % self.bond_dir, self.downdelay) dict = { '0': ['packets_per_slave', 'resend_igmp'], '1': ['num_unsol_na', 'primary', 'primary_reselect', 'resend_igmp'], '2': ['xmit_hash_policy'], '4': ['lacp_rate', 'xmit_hash_policy'], '5': [ 'tlb_dynamic_lb', 'primary', 'primary_reselect', 'resend_igmp', 'xmit_hash_policy', 'lp_interval' ], '6': ['primary', 'primary_reselect', 'resend_igmp', 'lp_interval'] } if self.mode in dict.keys(): for param in dict[self.mode]: param_value = self.params.get(param, default='') if param_value: genio.write_file( "%s/bonding/%s" % (self.bond_dir, param), param_value) for val in self.host_interfaces: if self.ib: self.bond_ib_conf(self.bond_name, val, "ATTACH") else: genio.write_file(self.bonding_slave_file, "+%s" % val) time.sleep(2) bond_name_val = '' for line in genio.read_file(self.bond_status).splitlines(): if 'Bonding Mode' in line: bond_name_val = line.split(':')[1] self.log.info("Trying bond mode %s [ %s ]", arg2, bond_name_val) for ifs in self.host_interfaces: cmd = "ip link set %s up" % ifs if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("unable to interface up") cmd = "ip addr add %s/%s dev %s;ip link set %s up"\ % (self.local_ip, self.net_mask[0], self.bond_name, self.bond_name) process.system(cmd, shell=True, ignore_status=True) for _ in range(0, 600, 60): if 'state UP' in process.system_output( "ip link \ show %s" % self.bond_name, shell=True).decode("utf-8"): self.log.info("Bonding setup is successful on\ local machine") break time.sleep(60) else: self.fail("Bonding setup on local machine has failed") if self.gateway: cmd = 'ip route add default via %s dev %s' % \ (self.gateway, self.bond_name) process.system(cmd, shell=True, ignore_status=True) else: self.log.info("Configuring Bonding on Peer machine") self.log.info("------------------------------------------") cmd = '' for val in self.peer_interfaces: cmd += 'ip addr flush dev %s;' % val for val in self.peer_interfaces: cmd += 'ip link set %s down;' % val cmd += 'modprobe bonding;' cmd += 'echo +%s > %s;'\ % (self.bond_name, self.bonding_masters_file) cmd += 'echo 0 > %s/bonding/mode;'\ % self.bond_dir cmd += 'echo 100 > %s/bonding/miimon;'\ % self.bond_dir cmd += 'echo 2 > %s/bonding/fail_over_mac;'\ % self.bond_dir for val in self.peer_interfaces: if self.ib: self.bond_ib_conf(self.bond_name, val, "ATTACH") else: cmd += 'echo "+%s" > %s;' % (val, self.bonding_slave_file) for val in self.peer_interfaces: cmd += 'ip link set %s up;' % val cmd += 'ip addr add %s/%s dev %s;ip link set %s up;sleep 5;'\ % (self.peer_first_ipinterface, self.net_mask[0], self.bond_name, self.bond_name) output = self.session.cmd(cmd) if not output.exit_status == 0: self.fail("bond setup command failed in peer machine")
def run(test, params, env): """ Test 802.1Q vlan of NIC among guests and host with linux bridge backend. 1) Configure vlan interface over host bridge interface. 2) Create two VMs over vlan interface. 3) Load 8021q module in guest. 4) Configure ip address of guest with 192.168.*.* 5) Test by ping between guest and host, should fail. 6) Test by ping beween guests, should pass. 7) Setup vlan in guests and using hard-coded ip address 192.168.*.* 8) Test by ping between guest and host, should pass. 9) Test by ping among guests, should pass. 10) Test by netperf between guests and host. 11) Test by netperf between guests. 12) Delete vlan interface in host. :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def add_vlan(interface, v_id, session=None): """ Create a vlan-device on interface. :params interface: Interface. :params v_id: Vlan id. :params session: VM session or none. """ vlan_if = '%s.%s' % (interface, v_id) add_cmd = params["add_vlan_cmd"] % (interface, vlan_if, v_id) error_context.context("Create vlan interface '%s' on %s" % (vlan_if, interface), logging.info) if session: session.cmd(add_cmd) else: process.system(add_cmd) return vlan_if def set_ip_vlan(vlan_if, vlan_ip, session=None): """ Set ip address of vlan interface. :params vlan_if: Vlan interface. :params vlan_ip: Vlan internal ip. :params session: VM session or none. """ error_context.context("Assign IP '%s' to vlan interface '%s'" % (vlan_ip, vlan_if), logging.info) if session: session.cmd("ifconfig %s 0.0.0.0" % vlan_if) session.cmd("ifconfig %s down" % vlan_if) session.cmd("ifconfig %s %s up" % (vlan_if, vlan_ip)) else: process.system("ifconfig %s %s up" % (vlan_if, vlan_ip)) def set_mac_vlan(vlan_if, mac_str, session): """ Give a new mac address for vlan interface in guest. :params: vlan_if: Vlan interface. :params: mac_str: New mac address for vlan. :params: session: VM session. """ mac_cmd = "ip link set %s add %s up" % (vlan_if, mac_str) error_context.context("Give a new mac address '%s' for vlan interface " "'%s'" % (mac_str, vlan_if), logging.info) session.cmd(mac_cmd) def set_arp_ignore(session): """ Enable arp_ignore for all ipv4 device in guest """ error_context.context("Enable arp_ignore for all ipv4 device in guest", logging.info) ignore_cmd = "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore" session.cmd(ignore_cmd) def ping_vlan(vm, dest, vlan_if, session): """ Test ping between vlans, from guest to host/guest. :params vm: VM object :params dest: Dest ip to ping. :params vlan_if: Vlan interface. :params session: VM session. """ error_context.context("Test ping from '%s' to '%s' on guest '%s'" % (vlan_if, dest, vm.name)) status, output = utils_test.ping(dest=dest, count=10, interface=vlan_if, session=session, timeout=30) if status: raise NetPingError(vlan_if, dest, output) def netperf_vlan(client='main_vm', server='localhost', sub_type='netperf_stress'): """ Test netperf stress among guests and host. :params client: Netperf client. :params server: Netperf server. :params sub_type: Sub_type to run. """ params["netperf_client"] = client params["netperf_server"] = server error_context.context("Run netperf stress test among guests and host, " "server: %s, client: %s" % (server, client), logging.info) utils_test.run_virt_sub_test(test, params, env, sub_type) vms = [] sessions = [] ifname = [] vm_ip = [] vm_vlan_ip = [] vm_vlan_if = [] sub_type = params["sub_type"] host_br = params.get("netdst", "switch") host_vlan_id = params.get("host_vlan_id", "10") host_vlan_ip = params.get("host_vlan_ip", "192.168.10.10") subnet = params.get("subnet", "192.168") mac_str = params.get("mac_str").split(',') br_backend = utils_net.find_bridge_manager(host_br) if not isinstance(br_backend, utils_net.Bridge): test.cancel("Host does not use Linux Bridge") linux_modules.load_module("8021q") host_vlan_if = "%s.%s" % (host_br, host_vlan_id) if host_vlan_if not in utils_net.get_net_if(): host_vlan_if = add_vlan(interface=host_br, v_id=host_vlan_id) if host_vlan_if in utils_net.get_net_if(): set_ip_vlan(vlan_if=host_vlan_if, vlan_ip=host_vlan_ip) rm_host_vlan_cmd = params["rm_host_vlan_cmd"] % host_vlan_if funcatexit.register(env, params["type"], _system, rm_host_vlan_cmd) else: test.cancel("Fail to set up vlan over bridge interface in host!") if params.get("start_vm", "yes") == "no": vm_main = env.get_vm(params["main_vm"]) vm_main.create(params=params) vm2 = env.get_vm("vm2") vm2.create(params=params) vms.append(vm_main) vms.append(vm2) else: vms.append(env.get_vm([params["main_vm"]])) vms.append(env.get_vm('vm2')) for vm_ in vms: vm_.verify_alive() for vm_index, vm in enumerate(vms): error_context.context("Prepare test env on %s" % vm.name) session = vm.wait_for_serial_login() if not session: err_msg = "Could not log into guest %s" % vm.name test.error(err_msg) interface = utils_net.get_linux_ifname(session, vm.get_mac_address()) error_context.context("Load 8021q module in guest %s" % vm.name, logging.info) session.cmd_output_safe("modprobe 8021q") error_context.context("Setup vlan environment in guest %s" % vm.name, logging.info) inter_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 1) set_ip_vlan(interface, inter_ip, session=session) set_arp_ignore(session) params["vlan_nic"] = "%s.%s" % (interface, host_vlan_id) error_context.context("Test ping from guest '%s' to host with " "interface '%s'" % (vm.name, interface), logging.info) try: ping_vlan(vm, dest=host_vlan_ip, vlan_if=interface, session=session) except NetPingError: logging.info("Guest ping fail to host as expected with " "interface '%s'", interface) else: test.fail("Guest ping to host should fail with interface" " '%s'" % interface) ifname.append(interface) vm_ip.append(inter_ip) sessions.append(session) # Ping succeed between guests error_context.context("Test ping between guests with interface %s" % ifname[0], logging.info) ping_vlan(vms[0], dest=vm_ip[1], vlan_if=ifname[0], session=sessions[0]) # set vlan tag for guest for vm_index, vm in enumerate(vms): session = sessions[vm_index] error_context.context("Add vlan interface on guest '%s'" % vm.name) session.cmd("ifconfig %s 0.0.0.0" % ifname[vm_index]) vlan_if = add_vlan(interface=ifname[vm_index], v_id=host_vlan_id, session=session) vm_vlan_if.append(vlan_if) set_mac_vlan(vlan_if, mac_str[vm_index], session=session) vlan_ip = "%s.%s.%d" % (subnet, host_vlan_id, vm_index + 11) set_ip_vlan(vlan_if, vlan_ip, session=session) vm_vlan_ip.append(vlan_ip) error_context.context("Test ping from interface '%s' on guest " "'%s' to host." % (vm_vlan_if[vm_index], vm.name), logging.info) ping_vlan(vm, dest=host_vlan_ip, vlan_if=vm_vlan_if[vm_index], session=session) netperf_vlan(client=vm.name, server="localhost") error_context.context("Test ping and netperf between guests with " "interface '%s'" % vm_vlan_if[vm_index], logging.info) ping_vlan(vms[0], dest=vm_vlan_ip[1], vlan_if=vm_vlan_if[0], session=sessions[0]) netperf_vlan(client=params["main_vm"], server='vm2') exithandlers = "exithandlers__%s" % sub_type sub_exit_timeout = int(params.get("sub_exit_timeout", 10)) start_time = time.time() end_time = start_time + float(sub_exit_timeout) while time.time() < end_time: logging.debug("%s (%f secs)", sub_type + " is running", (time.time() - start_time)) if env.data.get(exithandlers): break time.sleep(1) for sess in sessions: if sess: sess.close()
def bond_setup(self, arg1, arg2): ''' bond setup ''' if arg1 == "local": self.log.info("Configuring Bonding on Local machine") self.log.info("--------------------------------------") for ifs in self.host_interfaces: cmd = "ip addr flush dev %s" % ifs process.system(cmd, shell=True, ignore_status=True) for ifs in self.host_interfaces: cmd = "ip link set %s down" % ifs process.system(cmd, shell=True, ignore_status=True) linux_modules.load_module("bonding") genio.write_file(self.bonding_masters_file, "+%s" % self.bond_name) genio.write_file("%s/bonding/mode" % self.bond_dir, arg2) genio.write_file("%s/bonding/miimon" % self.bond_dir, "100") genio.write_file("%s/bonding/fail_over_mac" % self.bond_dir, "2") for val in self.host_interfaces: genio.write_file(self.bonding_slave_file, "+%s" % val) time.sleep(2) bond_name_val = '' for line in genio.read_file(self.bond_status).splitlines(): if 'Bonding Mode' in line: bond_name_val = line.split(':')[1] self.log.info("Trying bond mode %s [ %s ]", arg2, bond_name_val) for ifs in self.host_interfaces: cmd = "ip link set %s up" % ifs if process.system(cmd, shell=True, ignore_status=True) != 0: self.fail("unable to interface up") cmd = "ip addr add %s/%s dev %s;ip link set %s up"\ % (self.local_ip, self.net_mask[0], self.bond_name, self.bond_name) process.system(cmd, shell=True, ignore_status=True) for _ in range(0, 600, 60): if 'state UP' in process.system_output("ip link \ show %s" % self.bond_name, shell=True): self.log.info("Bonding setup is successful on\ local machine") break time.sleep(60) else: self.fail("Bonding setup on local machine has failed") if self.gateway: cmd = 'ip route add default via %s dev %s' % \ (self.gateway, self.bond_name) process.system(cmd, shell=True, ignore_status=True) else: self.log.info("Configuring Bonding on Peer machine") self.log.info("------------------------------------------") cmd = '' for val in self.peer_interfaces: cmd += 'ip addr flush dev %s;' % val for val in self.peer_interfaces: cmd += 'ip link set %s down;' % val cmd += 'modprobe bonding;' cmd += 'echo +%s > %s;'\ % (self.bond_name, self.bonding_masters_file) cmd += 'echo 0 > %s/bonding/mode;'\ % self.bond_dir cmd += 'echo 100 > %s/bonding/miimon;'\ % self.bond_dir cmd += 'echo 2 > %s/bonding/fail_over_mac;'\ % self.bond_dir for val in self.peer_interfaces: cmd += 'echo "+%s" > %s;' % (val, self.bonding_slave_file) for val in self.peer_interfaces: cmd += 'ip link set %s up;' % val cmd += 'ip addr add %s/%s dev %s;ip link set %s up;sleep 5;'\ % (self.peer_first_ipinterface, self.net_mask[0], self.bond_name, self.bond_name) peer_cmd = "timeout %s ssh %s@%s \"%s\""\ % (self.peer_wait_time, self.user, self.peer_first_ipinterface, cmd) if process.system(peer_cmd, shell=True, ignore_status=True) != 0: self.fail("bond setup command failed in peer machine")