def panic(self): cmd = 'echo 1 > /proc/sys/kernel/sysrq; echo c | tee /proc/sysrq-trigger' session = test_utils.get_host_session(self.params, 'instance', self.vm_ip) self.injection_time = time.time() session.run(cmd, timeout=20, ignore_status=True) self.log.info('Run cmd %s on %s successfully1' % (cmd, self.vm_ip))
def record_cpu_info(self, prime_log_path='/tmp/prime.log', host=None, controller_ip=None, controller_password=None): """ :param prime_log_path: :param host: the node host where the instance located :param controller_ip: :param controller_password: :return: """ self.log.info('Prepare to record cpu status.') session = test_utils.get_host_session(self.params, 'controller') ssh_cmd = "ssh -q %s 'yum install sysstat -y && sar -P ALL 1 20 > %s'" \ % (host, prime_log_path) session.run(ssh_cmd) session.run('scp %s:%s %s' % (host, prime_log_path, prime_log_path)) remote_package = remote.Remote_Package(address=controller_ip, client='ssh', username="******", password=controller_password, port='22', remote_path=prime_log_path) remote_package.pull_file(local_path=prime_log_path) session.run('rm -rf %s' % prime_log_path) self.log.info("Copy prime.log back to local.")
def _create_vm_and_cpuset(self, cpu_type, check_cpu_key, host=None): if cpu_type == "dedicated": extra_spec = {"hw:cpu_policy": cpu_type, "hw:cpu_thread_policy": "isolate"} elif cpu_type == "shared": extra_spec = {"hw:cpu_policy": cpu_type} else: raise exceptions.TestFail("cpu_policy: %s is NOT expected." % cpu_type) # Create instance1 with cpu_policy and cpu_thread_policy if not host: self.log.info('Create first vm in one of the nodes.') vm_name = 'cloudtest_CPU_' + utils_misc.generate_random_string(6) self.flavor = self.compute_utils.create_flavor( ram=self.ram, vcpus=self.vcpus, disk=self.disk, extra_spec=extra_spec) instance = test_utils.create_vm_with_cpu_pinning_and_wait_for_login( self.params, vm_name, injected_key=self.pub_key, ram=self.ram, vcpus=self.vcpus, disk=self.disk, flavor_name=self.flavor.name, **extra_spec) host = self.compute_utils.get_server_host(vm_name).split('.')[0] else: self.log.info( 'Create second vm in the nodes where the first vm located.') self.flavor = self.compute_utils.create_flavor( extra_spec=extra_spec) node = host + '.domain.tld' vm_name = self.compute_utils.create_vm_on_specific_node( node_name=node, flavor_name=self.flavor.name) host = u'%s' % host instance = self.compute_utils.find_vm_from_name(vm_name) if not self.compute_utils.wait_for_vm_active(instance): raise exceptions.TestFail("Failed to build VM: %s" % instance) # Get instance name via name self.vm_list.append(instance) instance_name = self.compute_utils.get_vm_domain_name(vm_name) self.log.info("Instance name is %s" % instance_name) # Get host via name self.log.info("Host is %s" % host) self.controller_session = test_utils.get_host_session(self.params, 'controller') if check_cpu_key == "cpu_policy": # Check cpu policy state, cpu_list = test_utils.check_cpuset(instance_name, host, cpu_type, self.controller_session) if state: self.isolate_cpu_list = cpu_list self.log.info("%s: cpu_policy is %s, cpusets are expected." % (vm_name, cpu_type)) elif check_cpu_key == "cpu_thread_policy": # Check cpu policy if test_utils.check_vm_cpu_isolation(host, self.controller_session): self.log.info("%s: cpu_policy is %s, cpusets are expected." % (vm_name, cpu_type)) return instance, str(host)
def setup(self): self.service_action = self.params.get('action', 'restart') self.service_name, master_host_ip = test_utils.get_ha_protect_service_info( self.controller_ip, self.controller_password, self.ha_config_path, master=True) self.master_session = test_utils.get_host_session( self.params, 'controller', master_host_ip) self.service_name, slave_host_ip = test_utils.get_ha_protect_service_info( self.controller_ip, self.controller_password, self.ha_config_path, master=False) self.slave_session = test_utils.get_host_session( self.params, 'controller', master_host_ip) if self.params.get('service_name') is not None: self.service_name = self.params.get('service_name')
def __wait_for_vm_responsive(self, vm_name, vm_fip): login_benchmark = int(self.params.get('vm_creation_benchmark', '360')) cmd = self.params.get('test_vm_responsive_cmd', 'hostname') session = test_utils.get_host_session(self.params, 'instance', vm_fip) expected_result = None if cmd in 'hostname': expected_result = vm_name elif cmd in 'whoami': expected_result = self.image_username return test_utils.wait_for_cmd_execution_within_vm( session, cmd, expected_result, login_benchmark)
def __execute_cmd_within_vm(self, vm, cmd, stdout=''): vm_ip = self.compute_utils.assign_floating_ip_to_vm(vm) self.log.info("Created VM '%s', try to login via %s" % (vm.name, vm_ip)) session = test_utils.get_host_session(self.params, 'instance', vm_ip) status = self.__wait_for_vm_responsive(session, cmd, stdout) if status: self.log.info('Run cmd %s on %s successfully!' % (cmd, vm_ip)) else: raise exceptions.TestFail( "Failed to execute cmd %s within vm %s!" % (cmd, vm_ip))
def setup(self): self.ipmi_ip = self.params.get('ipmi_ip') self.ipmi_user = self.params.get('ipmi_user') self.ipmi_passwd = self.params.get('ipmi_passwd') self.fault_type = self.params.get('fault_type') self.ping_recovery_time = self.params.get("ping_recovery_time") self.status_recovery_time = int( self.params.get("status_recovery_time")) self.controller_session = test_utils.get_host_session( self.params, 'controller')
def get_prime95_name(self, node_ip=None): session = test_utils.get_host_session(self.params, 'controller') ssh_cmd = "ssh %s " % node_ip cmd = "'getconf LONG_BIT'" process_info = session.run(ssh_cmd + cmd).stdout if '64' in process_info: prime_name = 'prime95.linux64.tar.gz' else: prime_name = 'prime95.linux32.tar.gz' self.log.info('Prime name is : %s' % prime_name) return prime_name
def wait_for_ping(self): """ Waiting for vm_for_panic can ping by vm_for_ping. """ cmd = 'ping -c %s -i 1 %s' % (self.ping_recovery_time, self.ip_for_panic) time_out = int(self.ping_recovery_time) + 10 session = test_utils.get_host_session(self.params, 'instance', self.ip_for_ping) self.ping_msg = session.run(cmd, timeout=time_out, ignore_status=True) self.log.info('Run cmd %s on %s successfully1' % (cmd, self.ip_for_ping)) self.log.debug(self.ping_msg)
def __enable_DPDK(self): """ Enable DPDK on vm_dpdk """ self.log.info('Begin enable DPDK......') session = test_utils.get_host_session(self.params, 'instance', self.vm_ip_dpdk) self.session_list.append(session) cmd = "sh %s/%s %s" % (self.dstpath, self.shell_dpdk, os.path.join(self.dstpath, self.dpdk_version)) self.log.info("run cmd: %s" % cmd) result = session.run(command=cmd, timeout=2000, ignore_status=True) self.dpdk_msg = result.stdout self.log.info(self.dpdk_msg)
def __configure_pktgen(self): """ Using linux pktgen send data :param ip: floating ip of vm_pktgen :return: """ self.log.info('Begin configure pktgen......') session = test_utils.get_host_session(self.params, 'instance', self.vm_ip_pktgen) self.session_list.append(session) cmd_setup = "sh %s/%s %s %s" % (self.dstpath, self.setup_pktgen, self.eth2_macaddr, self.eth1_macaddr) cmd_start_pktgen = "sh %s/%s" % (self.dstpath, self.start_pktgen) session.run(command=cmd_setup, timeout=20) session.run(command=cmd_start_pktgen, timeout=2000)
def test_vm_operation_with_numa_node_pinning(self): self.controller_session = test_utils.get_host_session( self.params, 'controller') if self.controller_session is None: raise exceptions.TestFail("Log in controller with ip %s failed." % self.controller_ip) # Get NUMA count on computer node numa_count = test_utils.get_numa_count(self.compute_ip, self.controller_session) self.log.info("Create VM with NUMA pinning") self._create_vm_and_cpus(numa_count) # Verify NUMA pinning test_utils.check_numa_pinning(self.instance_name, self.compute_ip, numa_count, self.controller_session)
def __wait_for_vm_responsive(self, vm): login_benchmark = int(self.params.get('vm_creation_benchmark', '360')) cmd = self.params.get('test_vm_responsive_cmd', 'hostname') vm_fip = self.compute_utils.assign_floating_ip_to_vm(vm) session = test_utils.get_host_session(self.params, 'instance', vm_fip) expected_result = None if cmd in 'hostname': expected_result = vm.name elif cmd in 'whoami': expected_result = self.image_username status = test_utils.wait_for_cmd_execution_within_vm( session, cmd, expected_result, login_benchmark) if not status: self.compute_utils.capture_vm_console_log(vm_fip) raise exceptions.TestFail( "Exception happened during execute cmd within vm: %s" % vm.name)
def setup(self): self.vm_1_name = 'cloudtest_' + utils_misc.generate_random_string(6) self.vm_2_name = 'cloudtest_' + utils_misc.generate_random_string(6) LOG.info("Try to get two compute nodes") hyors = self.compute_utils.novaclient.hypervisors.list() if len(hyors) < 2: raise exceptions.TestSetupFail( "Failed to get enough compute nodes") hyors_index = self.get_randomindex(len(hyors), 2) LOG.info("Try to get compute node ip.") computenode_ip = self.get_computenode_ip( hyors[hyors_index[0]]._info["service"]["host"]) LOG.info("Got compute node ip :%s" % computenode_ip) self.session_computenode = self.get_session_computenode(computenode_ip, usekey=True) LOG.info("To check if it supports nic bonding") self.nicbonding = self.get_nic_bonding(self.session_computenode) if self.nicbonding is None: raise exceptions.TestSkipError("Did not find bonding nic, " "skip the test") else: LOG.info("Got a bonding nic %s" % self.nicbonding) self.vm1 = self.create_vm_with_az( self.vm_1_name, hyors[hyors_index[0]]._info["service"]["host"]) self.register_cleanup(self.vm1) self.vm2 = self.create_vm_with_az( self.vm_2_name, hyors[hyors_index[1]]._info["service"]["host"]) self.register_cleanup(self.vm2) self.compute_utils.assign_floating_ip_to_vm(self.vm1) self.compute_utils.assign_floating_ip_to_vm(self.vm2) self.ipaddr_1 = self.compute_utils.get_vm_ipaddr(self.vm_1_name) self.ipaddr_2 = self.compute_utils.get_vm_ipaddr(self.vm_2_name) time.sleep(10) self.session_vm = test_utils.get_host_session( self.params, 'instance', self.ipaddr_1["floating"]) checkpath = "/etc/sysconfig/network-scripts" self.nics = self.get_eths_forbonded(self.session_computenode, checkpath, self.nicbonding) if len(self.nics) == 0: raise exceptions.TestSetupFail("Failed to get bonded nic") LOG.info("%s bonded to be %s" % (self.nics, self.nicbonding))
def test_vm_max_count(self): controller_session = test_utils.get_host_session(self.params, 'controller') vm_domain_name_list = [] extra_spec = {"hw:cpu_policy": "dedicated"} flavor = self.compute_utils.create_flavor(ram=self.ram, vcpus=self.vcpus, disk=self.disk, extra_spec=extra_spec) self.register_cleanup(flavor) nodes = self.compute_utils.get_all_hypervisors() host_name = nodes[0].hypervisor_hostname host_ip = nodes[0].host_ip vm_num = 0 vm_name_str = "cloudtest-" + utils_misc.generate_random_string(6) while True: vm_name = vm_name_str + "-" + str(vm_num) vm_num = vm_num +1 net = test_utils.get_test_network(self.params) self.compute_utils.create_vm_on_specific_node( node_name=host_name, flavor_name=flavor.name, injected_key=self.pub_key, network_name=net['name'], vm_name=vm_name) vm = self.compute_utils.find_vm_from_name(vm_name) self.register_cleanup(vm) status = self.compute_utils.wait_for_vm_active( vm, delete_on_failure=False) if not status: break vm_domain_name = self.compute_utils.get_vm_domain_name(vm_name) vm_domain_name_list.append(vm_domain_name) self.log.info("Can create %s vms on node %s when set " "cpu dedicated policy." % (str(vm_num - 1), host_name)) dpdk_core_list = self._get_dpdk_core(host_ip, controller_session) test_utils.check_vm_cpu_isolation(host_ip, controller_session, dpdk_core_list)
def setup(self): self.volume_name = 'cloudtest_' + \ utils_misc.generate_random_string(6) self.volume_id = None LOG.info("To get flavor %s" % self.params["flavor_name"]) self.flavor_detail = self.get_flavor_detail() self.flavor = self.get_flavor(self.params["flavor_name"], int(self.flavor_detail["mem"]), int(self.flavor_detail["cpu"]), int(self.flavor_detail["disk"])) LOG.info("Flavor %s id is %s" % (self.params["flavor_name"], self.flavor.id)) if self.flavor.id is None: raise exceptions.TestSetupFail("Failed to get flavor %s" % self.params["flavor_name"]) if self.params.has_key("create_vm"): self.vmname = 'cloudtest_' + \ utils_misc.generate_random_string(6) LOG.info("Creating a new vm %s now" % self.vmname) self.add_to_cfg(self.params["result_file"], "vmname", self.vmname) vm = self.compute_utils.create_vm( vm_name=self.vmname, image_name=self.params["image_name"], flavor_name=self.params["flavor_name"], network_name=self.params["network_name"], injected_key=None, sec_group=None, availability_zone=None) vm_created = self.compute_utils.wait_for_vm_active( vm, 1, int(self.params["vmtobeactive_timeout"])) if vm_created == False: raise exceptions.TestSetupFail("Created VM %s timeout" % self.vmname) self.compute_utils.assign_floating_ip_to_vm(vm) ipaddr = self.compute_utils.get_vm_ipaddr(self.vmname) self.volume_id = self.volume_utils.create_volume( self.volume_name, int(self.flavor_detail["disk"])) LOG.info("Created a new volume %s" % self.volume_name) self.add_to_cfg(self.params["result_file"], "volume_id", self.volume_id) time.sleep(5) LOG.info("Try to make a session on FIP %s for vm" % ipaddr["floating"]) try: self.session_vm = test_utils.get_host_session( self.params, 'instance', ipaddr["floating"]) except Exception: params = self.params params["image_ssh_auth_method"] = "keypair" self.session_vm = test_utils.get_host_session( self.params, 'instance', ipaddr["floating"]) self.compute_utils.attach_volume(vm.id, self.volume_id) time.sleep(10) if not self.check_if_vm_has_device(self.session_vm, self.params["fio_devname_vm"]): raise exceptions.TestSetupFail( "Failed to prepare new disk for FIO") self.prepare_fiodisk(self.session_vm, self.params["fio_devname_vm"], self.params["fio_diskdir_vm"]) else: self.vmname = self.get_cfg_item(self.params["result_file"], "vmname") if self.vmname is None: raise exceptions.TestSetupFail("Failed to get vm name") LOG.info("Try to get the vm %s" % self.vmname) vm = self.compute_utils.find_vm_from_name(self.vmname) ipaddr = self.compute_utils.get_vm_ipaddr(self.vmname) LOG.info("Try to make a session on FIP %s for vm" % ipaddr["floating"]) try: self.session_vm = test_utils.get_host_session( self.params, 'instance', ipaddr["floating"]) except Exception: params = self.params params["image_ssh_auth_method"] = "keypair" params["image_ssh_username"] = self.params[ "image_ssh_username_alt"] try: self.session_vm = test_utils.get_host_session( self.params, 'instance', ipaddr["floating"]) except Exception: raise exceptions.TestSetupFail( "Failed to set a session to VM") if not self.check_if_vm_has_device(self.session_vm, self.params["fio_devname_vm"]): raise exceptions.TestSetupFail( "Failed to prepare new disk for FIO") cmd = ("[ -d %s ]" % self.params["fio_diskdir_vm"]) run_result = self.session_vm.run(cmd) if run_result.exit_status != 0: raise exceptions.TestSetupFail( "Failed to prepare new disk for FIO")
def test_live_migrate_dpdk_enabled(self, is_live_migrate=True): self.__set_host_name() self.__create_net_with_subnet(count=2) self.__create_port() cpu_policy_type = "dedicated" self.__create_flavor_with_cpu_pinning(cpu_type=cpu_policy_type) self.vm_dpdk, self.vm_ip_dpdk = self.__create_vm_with_extra_nic( index=0) self.vm_pktgen, self.vm_ip_pktgen = self.__create_vm_with_extra_nic( index=1) session_dpdk = test_utils.get_host_session(self.params, 'instance', self.vm_ip_dpdk) session_pktgen = test_utils.get_host_session(self.params, 'instance', self.vm_ip_pktgen) self.session_list.append(session_dpdk) self.session_list.append(session_pktgen) self.eth1_macaddr, self.eth2_macaddr = \ self.__get_nic_macaddr(session_pktgen=session_pktgen) if is_live_migrate: self.__generate_script() else: self.__generate_script(dpdk_tool_path="usertools") # get ifconfig recv msg before enable pktgen ifconfig_msg_before = self.__get_ifconfig_msg(session=session_pktgen) # recv pkg num of eth1, eth2 recv_1_before, recv_2_before = \ self.__analyse_ifconfig_msg(config_msg=ifconfig_msg_before) self.__run_multi_thread() if is_live_migrate: self.__live_migrate() # after complete live migration 10s, stop l2fwd(dpdk) and pktgen time.sleep(10) # stop l2fwd and pktgen process cmd_pid = "ps aux | grep %s | awk '{print $2}'" % self.start_pktgen process_str = session_pktgen.run(cmd_pid) self.log.info('pktgen str %s' % process_str) pktgen_pid = process_str.stdout.split()[0] session_pktgen.run("kill -9 %s" % pktgen_pid) # get send pkg of eth1 and eth2 after run pktgen send_pkg_1, send_pkg_2 = self.__get_send_pkg( session_pktgen=session_pktgen) # this 10s using for get new data while stop pktgen time.sleep(10) session_dpdk.run("pkill l2fwd") # this 5s using for get msg of l2fwd return time.sleep(5) ifconfig_msg_after = self.__get_ifconfig_msg(session=session_pktgen) # recv pkg num of eth1, eth2 after live migration recv_1_after, recv_2_after = \ self.__analyse_ifconfig_msg(config_msg=ifconfig_msg_after) self.log.info("*********TEST RESULT*********") self.log.info("send pkt of pktgen eth1: %s; eth2: %s" % (send_pkg_1, send_pkg_2)) self.log.info("Using ifconfig eth1 recv pkt: %s; eth2 recv pkt: %s" % (recv_1_after, recv_2_after)) # recv pkg num using dpdk recv_dpdk_1, recv_dpdk_2 = self.__analyse_dpdk_data() self.log.info("Using dpdk eth1 recv pkt: %s; eth2 recv pkt: %s" % (recv_dpdk_1, recv_dpdk_2)) self.log.info("*********END*********") if is_live_migrate: # compute live migration time by ifconfig result_ifconfig = self.__compute_result(send_pkg_1=send_pkg_1, send_pkg_2=send_pkg_2, recv_pkg_1=recv_1_after, recv_pkg_2=recv_2_after, origin_pkg_1=recv_1_before, origin_pkg_2=recv_2_before) self.log.info( "VM %s live migration time: %ssec while using ifconfig" % (self.vm_dpdk.name, str(result_ifconfig))) if result_ifconfig > 0.2: raise exceptions.TestFail( "VM migration time must under 0.2sec") # compute live migration time by dpdk result_dpdk = self.__compute_result(send_pkg_1=send_pkg_1, send_pkg_2=send_pkg_2, recv_pkg_1=recv_dpdk_1, recv_pkg_2=recv_dpdk_2) self.log.info("VM %s live migration time: %ssec while using DPDK" % (self.vm_dpdk.name, str(result_dpdk))) if result_dpdk > 0.2: raise exceptions.TestFail( "VM migration time must under 0.2sec") else: diff1 = send_pkg_1 - recv_1_after diff2 = send_pkg_2 - recv_2_after self.log.info("Loss %s pkts on eth1" % str(diff1)) self.log.info("Loss %s pkts on eth2" % str(diff2)) diff_total = diff1 + diff2 if diff_total > 0: raise exceptions.TestFail("Too many pkts loss, " "when using different vesion dpdk.")
def __check_vms_responsive(self, timeout): start_time = time.time() i = 0 instance_session = None instance_fip = None while time.time() < (start_time + timeout): _vm = self.checking_vm_list[i] ret = self.compute_utils.wait_for_vm_in_status(_vm, 'ACTIVE', 3, timeout=9) if not ret: _vm = self.compute_utils.find_vm_from_name(_vm.name) if _vm.status == 'ERROR': self.log.error("Failed to create vm %s" % _vm.name) self.error_vm_list.append(_vm) self.checking_vm_list.pop(i) else: i += 1 elif instance_session is None: if self.params.get('vms_operation', 'creation') in 'soft_reboot': vm_ipaddr = self.compute_utils.get_vm_ipaddr(_vm.name) self.log.info("vm addr: %s" % vm_ipaddr) if vm_ipaddr.get('floating') is None: i += 1 continue else: instance_fip = vm_ipaddr['floating'] if instance_fip is None: instance_fip = \ self.compute_utils.assign_floating_ip_to_vm(_vm) instance_session = test_utils.get_host_session( self.params, 'instance', instance_fip) self.__scp_private_key_to_vm(instance_session, instance_fip) vm_nic_mtu = self.params.get('mtu', '1400') cmd = 'sudo ifconfig eth0 mtu %s' % vm_nic_mtu self.log.info('Changing mtu to %s of VM: %s' % (vm_nic_mtu, self.vm_list[0])) instance_session.run(cmd, ignore_status=False) self.checking_vm_list.pop(i) else: if self.params.get('need_ssh_login') in 'yes': if self.test_vm_responsive_cmd in 'ping': ret = self.__wait_for_vm_ping_vm(instance_session, _vm) else: ret = self.__wait_for_cmd_execution_from_vm( instance_session, _vm) if ret: self.checking_vm_list.pop(i) else: i += 1 if len(self.checking_vm_list) == 0: break i = i % len(self.checking_vm_list) if len(self.error_vm_list) > 0 or len(self.checking_vm_list) > 0: for _vm in self.checking_vm_list: self.compute_utils.capture_vm_console_log(_vm) self.captured_vm_log = True raise exceptions.TestFail( "Failed to wait all created vm responsive!")
def _test_vm(self): fio_device_vm = None result_speccpu_vm = None result_fio_vm = None result_stream_vm = None LOG.debug("Start testing on VM") LOG.debug("Create VM %s on the same RAID of the LUN" % self.vm_name) LOG.debug("VM is booting on %s" % self.availability_zone) self.vm = self.compute_utils.create_vm( vm_name=self.vm_name, image_name=self.params["image_name"], flavor_name=self.flavor_name, network_name=self.params["network_name"], injected_key=None, sec_group=None, availability_zone=self.availability_zone) vm_created = self.compute_utils.wait_for_vm_active( self.vm, 1, self.vmtobeactive_timeout) if vm_created == False: raise exceptions.TestSetupFail("Failed to creating VM") self.register_cleanup(self.vm) self.compute_utils.assign_floating_ip_to_vm(self.vm) ipaddr = self.compute_utils.get_vm_ipaddr(self.vm_name) LOG.debug("Ip address:%s" % ipaddr) try: session_vm = test_utils.get_host_session(self.params, 'instance', ipaddr["floating"]) except: raise exceptions.TestSetupFail("Failed to associate FIP to VM") LOG.debug("Creating a new volume") self.volume_id = self.volume_utils.create_volume(self.volume_name, 1) self.register_cleanup(resource=self.volume_id, res_type='volume') LOG.debug("Mount the new volume to VM") _devnames_vm_1 = self.get_alldevname_vm(session_vm) self.compute_utils.attach_volume(self.vm.id, self.volume_id) _devnames_vm_2 = self.get_alldevname_vm(session_vm) LOG.debug("Get device name in VM") fio_device_vm = self.get_devname_vm(_devnames_vm_1, _devnames_vm_2) LOG.info("The New device on VM is %s" % fio_device_vm) casetype = "vm" LOG.info("Run SpecCPU testing on VM") exit_status = self.check_speccpu(self.speccpu_workdir_vm, session_vm) if exit_status == 0: if exit_status == 0: result_speccpu_vm = self.dospeccpu(self.speccpu_workdir_vm, session_vm, self.speccpu_timeout, self.times, casetype) else: LOG.info("SpecCPU is not ready on VM") LOG.info("Run Stream testing on VM") exit_status = self.check_stream(self.stream_workdir_vm, session_vm) if exit_status == 0: result_stream_vm = self.dostream(self.stream_workdir_vm, session_vm, self.stream_timeout, self.times, casetype) else: LOG.info("Stream is not ready on VM") LOG.info("Run FIO testing on VM") exit_status = self.check_fio(self.fio_workdir_vm, session_vm) if (not fio_device_vm is None) and (exit_status == 0): if fio_device_vm is not None: result_fio_vm = self.dofio(self.fio_workdir_vm, session_vm, self.fio_timeout, fio_device_vm, self.times, casetype) else: LOG.info("Did not get device in VM") else: LOG.info("FIO is not ready on VM") return result_speccpu_vm, result_stream_vm, result_fio_vm