def live_migration_guest(test, params, vm, session): """ Run migrate_set_speed, then migrate guest. """ qemu_migration.set_speed(vm, params.get("mig_speed", "1G")) vm.migrate()
def run(test, params, env): """ Offline migration with virtio-serial enabled. 1) Start guest with virtio serial device vs1 & vs2. 2) Transfer data via vs1 on the source guest. 3) Offline migration. 4) Transfer data via vs2 with the destination guest: :param test: QEMU test object. :param params: Dictionary with the test parameters. :param env: Dictionary with test environment. """ def run_serial_data_transfer(): """ Transfer data between two ports. """ for port in params.objects("serials"): port_params = params.object_params(port) if not port_params['serial_type'].startswith('virt'): continue params['file_transfer_serial_port'] = port transfer_data(params, vm) vm = env.get_vm(params["main_vm"]) vm.verify_alive() if params["os_type"] == "windows": session = vm.wait_for_login() driver_name = params["driver_name"] session = utils_test.qemu.windrv_check_running_verifier( session, vm, test, driver_name) session.close() error_context.context("transferring data on source guest", logging.info) run_serial_data_transfer() mig_protocol = params.get("migration_protocol", "tcp") mig_exec_cmd_src = params.get("migration_exec_cmd_src") mig_exec_cmd_dst = params.get("migration_exec_cmd_dst") mig_exec_file = params.get("migration_exec_file", "/var/tmp/exec") mig_exec_file += "-%s" % utils_misc.generate_random_string(8) mig_exec_cmd_src = mig_exec_cmd_src % mig_exec_file mig_exec_cmd_dst = mig_exec_cmd_dst % mig_exec_file qemu_migration.set_speed(vm, params.get("mig_speed", "1G")) try: vm.migrate(protocol=mig_protocol, offline=True, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst) error_context.context("transferring data on destination guest", logging.info) run_serial_data_transfer() vm.verify_kernel_crash() finally: os.remove(mig_exec_file)
def post_migration_speed(self, vm, cancel_delay, mig_offline, dsthost, vm_ports, not_wait_for_migration, fd, mig_data): mig_speed = None for mig_speed in range(self.min_speed, self.max_speed, self.speed_step): try: vm.wait_for_migration(self.wait_mig_timeout) break except virt_vm.VMMigrateTimeoutError: qemu_migration.set_speed(vm, "%sB" % (mig_speed)) # Test migration status. If migration is not completed then # it kill program which creates guest load. try: vm.wait_for_migration(self.mig_timeout) except virt_vm.VMMigrateTimeoutError: raise error.TestFail("Migration failed with setting " " mig_speed to %sB." % mig_speed) logging.debug("Migration passed with mig_speed %sB", mig_speed) vm.destroy(gracefully=False)
def test(self): cpu_model, extra_flags = parse_cpu_model() flags = HgFlags(cpu_model, extra_flags) logging.debug("Cpu mode flags %s.", str(flags.quest_cpu_model_flags)) logging.debug("Added flags %s.", str(flags.cpumodel_unsupport_flags)) cpuf_model = cpu_model # Add unsupported flags. for fadd in flags.cpumodel_unsupport_flags: cpuf_model += ",+" + str(fadd) for fdel in flags.host_unsupported_flags: cpuf_model += ",-" + str(fdel) (self.vm, _) = start_guest_with_cpuflags(cpuf_model, smp) install_path = "/tmp" install_cpuflags_test_on_vm(self.vm, install_path) flags = check_cpuflags_work(self.vm, install_path, flags.guest_flags) test.assertTrue(flags[0], "No cpuflags passed the check: %s" % str(flags)) test.assertFalse(flags[1], "Some cpuflags failed the check: %s" % str(flags)) dd_session = self.vm.wait_for_login() stress_session = self.vm.wait_for_login() dd_session.sendline("nohup dd if=$(echo /dev/[svh]da) of=/tmp/" "stressblock bs=10MB count=100 &") cmd = ("nohup %s/cpuflags-test --stress %s%s &" % (os.path.join(install_path, "src"), smp, cpu.kvm_flags_to_stresstests(flags[0]))) stress_session.sendline(cmd) time.sleep(5) qemu_migration.set_speed(self.vm, mig_speed) self.clone = self.vm.migrate( mig_timeout, mig_protocol, offline=False, not_wait_for_migration=True) time.sleep(5) try: self.vm.wait_for_migration(10) except virt_vm.VMMigrateTimeoutError: qemu_migration.set_downtime(self.vm, 1) self.vm.wait_for_migration(mig_timeout) self.clone.resume() self.vm.destroy(gracefully=False) stress_session = self.clone.wait_for_login() # If cpuflags-test hang up during migration test raise exception try: stress_session.cmd('killall cpuflags-test') except aexpect.ShellCmdError: test.fail("Stress cpuflags-test should be still " "running after migration.") try: stress_session.cmd("ls /tmp/stressblock && " "rm -f /tmp/stressblock") except aexpect.ShellCmdError: test.fail("Background 'dd' command failed to " "produce output file.")
def mig_set_speed(vm, params, test): mig_speed = params.get("mig_speed", "1G") return qemu_migration.set_speed(vm, mig_speed)
def test(self): from autotest.client.shared.syncdata import SyncData super(test_multihost_copy, self).test() copy_timeout = int(params.get("copy_timeout", 480)) checksum_timeout = int(params.get("checksum_timeout", 180)) pid = None sync_id = {'src': self.srchost, 'dst': self.dsthost, "type": "file_trasfer"} filename = "orig" remove_file_cmd = params["remove_file_cmd"] % filename dst_file = params["dst_file"] % filename if self.is_src: # Starts in source vm = env.get_vm(self.vms[0]) qemu_migration.set_speed(vm, "1G") session = vm.wait_for_login(timeout=login_timeout) cdrom_dev_list = list_guest_cdroms(session) logging.debug("cdrom_dev_list: %s", cdrom_dev_list) cdrom = get_testing_cdrom_device(vm, session, cdrom_dev_list, self.serial_num) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) copy_file_cmd = params[ "copy_file_cmd"] % (mount_point, filename) if params["os_type"] != "windows": error_context.context("Mount and copy data", logging.info) session.cmd(mount_cmd, timeout=30) error_context.context("File copying test", logging.info) session.cmd(remove_file_cmd) session.cmd(copy_file_cmd) pid = disk_copy(vm, src_file, dst_file, copy_timeout) sync = SyncData(self.mig.master_id(), self.mig.hostid, self.mig.hosts, sync_id, self.mig.sync_server) pid = sync.sync(pid, timeout=cdrom_prepare_timeout)[self.srchost] self.mig.migrate_wait([self.vms[0]], self.srchost, self.dsthost) if not self.is_src: # Starts in source vm = env.get_vm(self.vms[0]) session = vm.wait_for_login(timeout=login_timeout) error_context.context("Wait for copy finishing.", logging.info) cdrom_dev_list = list_guest_cdroms(session) cdrom = get_testing_cdrom_device(vm, session, cdrom_dev_list, self.serial_num) mount_point = get_cdrom_mount_point(session, cdrom, params) mount_cmd = params["mount_cdrom_cmd"] % (cdrom, mount_point) src_file = params["src_file"] % (mount_point, filename) md5sum_cmd = params["md5sum_cmd"] def is_copy_done(): if params["os_type"] == "windows": cmd = "tasklist /FI \"PID eq %s\"" % pid else: cmd = "ps -p %s" % pid return session.cmd_status(cmd) != 0 if not utils_misc.wait_for(is_copy_done, timeout=copy_timeout): test.fail("Wait for file copy finish timeout") error_context.context("Compare file on disk and on cdrom", logging.info) f1_hash = session.cmd(md5sum_cmd % dst_file, timeout=checksum_timeout).split()[0] f2_hash = session.cmd(md5sum_cmd % src_file, timeout=checksum_timeout).split()[0] if f1_hash.strip() != f2_hash.strip(): test.fail("On disk and on cdrom files are" " different, md5 mismatch") session.cmd(remove_file_cmd) self.mig._hosts_barrier(self.mig.hosts, self.mig.hosts, 'Finish_cdrom_test', login_timeout)
def run(test, params, env): """ KVM migration test: 1) Get a live VM and clone it. 2) Verify that the source VM supports migration. If it does, proceed with the test. 3) Start memory load on vm. 4) Send a migration command to the source VM and collecting statistic of migration speed. !) If migration speed is too high migration could be successful and then test ends with warning. 5) Kill off both VMs. 6) Print statistic of migration. :param test: kvm test object. :param params: Dictionary with test parameters. :param env: Dictionary with the test environment. """ vm = env.get_vm(params["main_vm"]) vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) mig_timeout = float(params.get("mig_timeout", "10")) mig_protocol = params.get("migration_protocol", "tcp") install_path = params.get("cpuflags_install_path", "/tmp") vm_mem = int(params.get("mem", "512")) get_mig_speed = re.compile(r"^transferred ram: (\d+) kbytes$", re.MULTILINE) mig_speed = params.get("mig_speed", "1G") mig_speed_accuracy = float(params.get("mig_speed_accuracy", "0.2")) clonevm = None def get_migration_statistic(vm): last_transfer_mem = 0 transfered_mem = 0 mig_stat = Statistic() while vm.monitor.get_migrate_progress() == 0: pass for _ in range(30): o = vm.monitor.info("migrate") warning_msg = ("Migration already ended. Migration speed is" " probably too high and will block vm while" " filling its memory.") fail_msg = ("Could not determine the transferred memory from" " monitor data: %s" % o) if isinstance(o, six.string_types): if "status: active" not in o: test.error(warning_msg) try: transfered_mem = int(get_mig_speed.search(o).groups()[0]) except (IndexError, ValueError): test.fail(fail_msg) else: if o.get("status") != "active": test.error(warning_msg) try: transfered_mem = o.get("ram").get("transferred") / (1024) except (IndexError, ValueError): test.fail(fail_msg) real_mig_speed = (transfered_mem - last_transfer_mem) / 1024 last_transfer_mem = transfered_mem logging.debug("Migration speed: %s MB/s" % (real_mig_speed)) mig_stat.record(real_mig_speed) time.sleep(1) return mig_stat try: # Reboot the VM in the background cpuflags.install_cpuflags_util_on_vm(test, vm, install_path, extra_flags="-msse3 -msse2") qemu_migration.set_speed(vm, mig_speed) cmd = ("%s/cpuflags-test --stressmem %d,%d" % (os.path.join(install_path, "cpu_flags", "src"), vm_mem * 4, vm_mem / 2)) logging.debug("Sending command: %s" % (cmd)) session.sendline(cmd) time.sleep(2) clonevm = vm.migrate(mig_timeout, mig_protocol, not_wait_for_migration=True, env=env) mig_speed = int(float( utils_misc.normalize_data_size(mig_speed, "M"))) mig_stat = get_migration_statistic(vm) real_speed = mig_stat.get_average() ack_speed = mig_speed * mig_speed_accuracy logging.info("Target migration speed: %d MB/s.", mig_speed) logging.info( "Average migration speed: %d MB/s", mig_stat.get_average()) logging.info("Minimum migration speed: %d MB/s", mig_stat.get_min()) logging.info("Maximum migration speed: %d MB/s", mig_stat.get_max()) logging.info("Maximum tolerable divergence: %3.1f%%", mig_speed_accuracy * 100) if real_speed < mig_speed - ack_speed: divergence = (1 - float(real_speed) / float(mig_speed)) * 100 test.error("Average migration speed (%s MB/s) " "is %3.1f%% lower than target (%s MB/s)" % (real_speed, divergence, mig_speed)) if real_speed > mig_speed + ack_speed: divergence = (1 - float(mig_speed) / float(real_speed)) * 100 test.error("Average migration speed (%s MB/s) " "is %3.1f%% higher than target (%s MB/s)" % (real_speed, divergence, mig_speed)) finally: session.close() if clonevm: clonevm.destroy(gracefully=False) if vm: vm.destroy(gracefully=False)