def run(test, params, env): """ Test virt-admin client-disconnect 1) Start several virsh connections; 2) disconnect some connections; 3) check whether virsh gives out the correct error messages; 4) check whether srv_clients_info gives out the correct info about the virsh clients. """ num_clients = params.get("num_clients") server_name = params.get("server_name") server_ip = params["server_ip"] = params.get("local_ip") server_user = params["server_user"] = params.get("local_user", "root") server_pwd = params["server_pwd"] = params.get("local_pwd") client_ip = params["client_ip"] = params.get("remote_ip") client_pwd = params["client_pwd"] = params.get("remote_pwd") client_user = params["server_user"] = params.get("remote_user", "root") tls_port = params.get("tls_port", "16514") tls_uri = "qemu+tls://%s:%s/system" % (server_ip, tls_port) tls_obj = None remote_virsh_dargs = {'remote_ip': client_ip, 'remote_user': client_user, 'remote_pwd': client_pwd, 'uri': tls_uri, 'ssh_remote_auth': True} if not server_name: server_name = virt_admin.check_server_name() daemon = utils_libvirtd.Libvirtd("virtproxyd") try: tls_obj = TLSConnection(params) tls_obj.conn_setup() tls_obj.auto_recover = True utils_iptables.Firewall_cmd().add_port(tls_port, 'tcp', permanent=True) clients_instant = [] for _ in range(int(num_clients)): # Under split daemon mode, we can connect to virtproxyd via # remote tcp/tls connections,can not connect to virtproxyd direct # on local host clients_instant.append(virsh.VirshPersistent(**remote_virsh_dargs)) out = virt_admin.srv_clients_list(server_name, ignore_status=True, debug=True) client_id = out.stdout_text.strip().splitlines()[-1].split()[0] result = virt_admin.client_disconnect(server_name, client_id, ignore_status=True, debug=True) if result.exit_status: test.fail("This operation should " "success but failed. output: \n %s" % result) elif result.stdout.decode().strip().split()[1][1:-1] != client_id: test.fail("virt-admin did not " "disconnect the correct client.") finally: daemon.restart() utils_iptables.Firewall_cmd().remove_port(tls_port, 'tcp', permanent=True)
def setup_conn_obj(conn_type, params, test): """ Setup connection object, like TLS :param conn_type: str, 'tls' for now :param params: dict, used to setup the connection :param test: test object :return: TLSConnection, the connection object """ logging.debug("Begin to create new {} connection".format(conn_type.upper())) conn_obj = None if conn_type.upper() == 'TLS': conn_obj = TLSConnection(params) conn_obj.auto_recover = True conn_obj.conn_setup() else: test.error("Invalid parameter, only support 'tls'") return conn_obj
def run(test, params, env): """ Test for basic serial character device function. 1) Define the VM with specified serial device and define result meets expectation. 2) Test whether defined XML meets expectation 3) Start the guest and check if start result meets expectation 4) Test the function of started serial device 5) Shutdown the VM and check whether cleaned up properly 6) Clean up environment """ def set_targets(serial): """ Prepare a serial device XML according to parameters """ machine = platform.machine() if "ppc" in machine: serial.target_model = 'spapr-vty' serial.target_type = 'spapr-vio-serial' elif "aarch" in machine: serial.target_model = 'pl011' serial.target_type = 'system-serial' elif "s390x" in machine and 'sclp' not in serial_type: if target_model: serial.target_model = target_model else: serial.target_model = 'sclpconsole' serial.target_type = 'sclp-serial' else: serial.target_model = target_type serial.target_type = target_type def prepare_spice_graphics_device(): """ Prepare a spice graphics device XML according to parameters """ graphic = Graphics(type_name='spice') graphic.autoport = "yes" graphic.port = "-1" graphic.tlsPort = "-1" return graphic def prepare_serial_device(): """ Prepare a serial device XML according to parameters """ local_serial_type = serial_type if serial_type == "tls": local_serial_type = "tcp" serial = librarian.get('serial')(local_serial_type) serial.target_port = "0" set_targets(serial) sources = [] logging.debug(sources_str) for source_str in sources_str.split(): source_dict = {} for att in source_str.split(','): key, val = att.split(':') source_dict[key] = val sources.append(source_dict) serial.sources = sources return serial def prepare_console_device(): """ Prepare a serial device XML according to parameters """ local_serial_type = serial_type if serial_type == "tls": local_serial_type = "tcp" console = librarian.get('console')(local_serial_type) console.target_type = console_target_type console.target_port = console_target_port sources = [] logging.debug(sources_str) for source_str in sources_str.split(): source_dict = {} for att in source_str.split(','): key, val = att.split(':') source_dict[key] = val sources.append(source_dict) console.sources = sources return console def define_and_check(): """ Predict the error message when defining and try to define the guest with testing serial device. """ fail_patts = [] if serial_type in [ 'dev', 'file', 'pipe', 'unix' ] and not any(['path' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source path attribute for char device") if serial_type in [ 'tcp' ] and not any(['host' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source host attribute for char device") if serial_type in [ 'tcp', 'udp' ] and not any(['service' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source service attribute for char " "device") if serial_type in [ 'spiceport' ] and not any(['channel' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source channel attribute for char " "device") if serial_type in [ 'nmdm' ] and not any(['master' in s for s in serial_dev.sources]): fail_patts.append(r"Missing master path attribute for nmdm device") if serial_type in [ 'nmdm' ] and not any(['slave' in s for s in serial_dev.sources]): fail_patts.append(r"Missing slave path attribute for nmdm device") if serial_type in ['spicevmc']: fail_patts.append(r"spicevmc device type only supports virtio") vm_xml.undefine() res = vm_xml.virsh.define(vm_xml.xml) libvirt.check_result(res, expected_fails=fail_patts) return not res.exit_status def check_xml(): """ Predict the result serial device and generated console device and check the result domain XML against expectation """ console_cls = librarian.get('console') local_serial_type = serial_type if serial_type == 'tls': local_serial_type = 'tcp' # Predict expected serial and console XML expected_console = console_cls(local_serial_type) if local_serial_type == 'udp': sources = [] for source in serial_dev.sources: if 'service' in source and 'mode' not in source: source['mode'] = 'connect' sources.append(source) else: sources = serial_dev.sources expected_console.sources = sources if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_console.protocol_type = serial_dev.protocol_type else: expected_console.protocol_type = "raw" expected_console.target_port = serial_dev.target_port if 'target_type' in serial_dev: expected_console.target_type = serial_dev.target_type expected_console.target_type = console_target_type logging.debug("Expected console XML is:\n%s", expected_console) # Get current serial and console XML current_xml = VMXML.new_from_dumpxml(vm_name) serial_elem = current_xml.xmltreefile.find('devices/serial') console_elem = current_xml.xmltreefile.find('devices/console') if console_elem is None: test.fail("Expect generate console automatically, " "but found none.") if serial_elem and console_target_type != 'serial': test.fail("Don't Expect exist serial device, " "but found:\n%s" % serial_elem) cur_console = console_cls.new_from_element(console_elem) logging.debug("Current console XML is:\n%s", cur_console) # Compare current serial and console with oracle. if not expected_console == cur_console: # "==" has been override test.fail("Expect generate console:\n%s\nBut got:\n%s" % (expected_console, cur_console)) if console_target_type == 'serial': serial_cls = librarian.get('serial') expected_serial = serial_cls(local_serial_type) expected_serial.sources = sources set_targets(expected_serial) if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_serial.protocol_type = serial_dev.protocol_type else: expected_serial.protocol_type = "raw" expected_serial.target_port = serial_dev.target_port if serial_elem is None: test.fail("Expect exist serial device, " "but found none.") cur_serial = serial_cls.new_from_element(serial_elem) if target_type == 'pci-serial': if cur_serial.address is None: test.fail("Expect serial device address is not assigned") else: logging.debug("Serial address is: %s", cur_serial.address) logging.debug("Expected serial XML is:\n%s", expected_serial) logging.debug("Current serial XML is:\n%s", cur_serial) # Compare current serial and console with oracle. if (target_type != 'pci-serial' and machine_type != 'pseries' and not expected_serial == cur_serial): # "==" has been override test.fail("Expect serial device:\n%s\nBut got:\n " "%s" % (expected_serial, cur_serial)) def prepare_serial_console(): """ Prepare serial console to connect with guest serial according to the serial type """ is_server = console_type == 'server' if serial_type == 'unix': # Unix socket path should match SELinux label socket_path = '/var/lib/libvirt/qemu/virt-test' console = Console('unix', socket_path, is_server) elif serial_type == 'tls': host = '127.0.0.1' service = 5556 console = Console('tls', (host, service), is_server, custom_pki_path) elif serial_type == 'tcp': host = '127.0.0.1' service = 2445 console = Console('tcp', (host, service), is_server) elif serial_type == 'udp': host = '127.0.0.1' service = 2445 console = Console('udp', (host, service), is_server) elif serial_type == 'file': socket_path = '/var/log/libvirt/virt-test' console = Console('file', socket_path, is_server) elif serial_type == 'pipe': socket_path = '/tmp/virt-test' console = Console('pipe', socket_path, is_server) else: logging.debug("Serial type %s don't support console test yet.", serial_type) console = None return console def check_qemu_cmdline(): """ Check if VM's qemu command line meets expectation. """ cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read() logging.debug('Qemu command line: %s', cmdline) options = cmdline.split('\x00') ser_opt = None con_opt = None ser_dev = None con_dev = None exp_ser_opt = None exp_ser_dev = None exp_con_opt = None exp_con_dev = None # Get serial and console options from qemu command line for idx, opt in enumerate(options): if opt == '-chardev': if 'id=charserial' in options[idx + 1]: ser_opt = options[idx + 1] ser_dev = options[idx + 3] logging.debug('Serial option is: %s', ser_opt) logging.debug('Serial device option is: %s', ser_dev) if 'id=charconsole' in options[idx + 1]: con_opt = options[idx + 1] con_dev = options[idx + 3] logging.debug('Console option is: %s', con_opt) logging.debug('Console device option is: %s', con_dev) # Get expected serial and console options from params if serial_type == 'dev': ser_type = 'tty' elif serial_type in ['unix', 'tcp', 'tls']: ser_type = 'socket' else: ser_type = serial_type exp_ser_opts = [ser_type, 'id=charserial0'] if serial_type in ['dev', 'file', 'pipe', 'unix']: for source in serial_dev.sources: if 'path' in source: path = source['path'] if serial_type == 'file': # Use re to make this fdset number flexible exp_ser_opts.append(r'path=/dev/fdset/\d+') exp_ser_opts.append('append=on') elif serial_type == 'unix': # Use re to make this fd number flexible exp_ser_opts.append(r'fd=\d+') else: exp_ser_opts.append('path=%s' % path) elif serial_type in ['tcp', 'tls']: for source in serial_dev.sources: if 'host' in source: host = source['host'] if 'service' in source: port = source['service'] exp_ser_opts.append('host=%s' % host) exp_ser_opts.append('port=%s' % port) elif serial_type in ['udp']: localaddr = '' localport = '0' for source in serial_dev.sources: if source['mode'] == 'connect': if 'host' in source: host = source['host'] if 'service' in source: port = source['service'] else: if 'host' in source: localaddr = source['host'] if 'service' in source: localport = source['service'] exp_ser_opts.append('host=%s' % host) exp_ser_opts.append('port=%s' % port) exp_ser_opts.append('localaddr=%s' % localaddr) exp_ser_opts.append('localport=%s' % localport) if serial_type in ['unix', 'tcp', 'udp', 'tls']: for source in serial_dev.sources: if 'mode' in source: mode = source['mode'] if mode == 'bind': exp_ser_opts.append('server') exp_ser_opts.append('nowait') if serial_type == 'tls': exp_ser_opts.append('tls-creds=objcharserial0_tls0') exp_ser_opt = ','.join(exp_ser_opts) arch = platform.machine() if "ppc" in arch: exp_ser_devs = [ 'spapr-vty', 'chardev=charserial0', 'reg=0x30000000' ] if libvirt_version.version_compare(3, 9, 0): exp_ser_devs.insert(2, 'id=serial0') elif "s390x" in arch: dev_type = 'sclpconsole' if target_model: dev_type = target_model exp_ser_devs = [dev_type, 'chardev=charserial0', 'id=serial0'] else: logging.debug('target_type: %s', target_type) if target_type == 'pci-serial': exp_ser_devs = [ 'pci-serial', 'chardev=charserial0', 'id=serial0', r'bus=pci.\d+', r'addr=0x\d+' ] else: exp_ser_devs = [ 'isa-serial', 'chardev=charserial0', 'id=serial0' ] exp_ser_dev = ','.join(exp_ser_devs) if console_target_type != 'serial' or serial_type == 'spicevmc': exp_ser_opt = None exp_ser_dev = None logging.debug("exp_ser_opt: %s", exp_ser_opt) logging.debug("ser_opt: %s", ser_opt) # Check options against expectation if exp_ser_opt is not None and re.match(exp_ser_opt, ser_opt) is None: test.fail('Expect get qemu command serial option "%s", ' 'but got "%s"' % (exp_ser_opt, ser_opt)) if exp_ser_dev is not None and ser_dev is not None \ and re.match(exp_ser_dev, ser_dev) is None: test.fail('Expect get qemu command serial device option "%s", ' 'but got "%s"' % (exp_ser_dev, ser_dev)) if console_target_type == 'virtio': exp_con_opts = [serial_type, 'id=charconsole1'] exp_con_opt = ','.join(exp_con_opts) exp_con_devs = [] if console_target_type == 'virtio': exp_con_devs.append('virtconsole') exp_con_devs += ['chardev=charconsole1', 'id=console1'] exp_con_dev = ','.join(exp_con_devs) if con_opt != exp_con_opt: test.fail('Expect get qemu command console option "%s", ' 'but got "%s"' % (exp_con_opt, con_opt)) if con_dev != exp_con_dev: test.fail( 'Expect get qemu command console device option "%s", ' 'but got "%s"' % (exp_con_dev, con_dev)) def check_serial_console(console, username, password): """ Check whether console works properly by read the result for read only console and login and emit a command for read write console. """ if serial_type in ['file', 'tls']: _, output = console.read_until_output_matches(['.*[Ll]ogin:']) else: console.wait_for_login(username, password) status, output = console.cmd_status_output('pwd') logging.debug("Command status: %s", status) logging.debug("Command output: %s", output) def cleanup(objs_list): """ Clean up test environment """ if serial_type == 'file': if os.path.exists('/var/log/libvirt/virt-test'): os.remove('/var/log/libvirt/virt-test') # recovery test environment for obj in objs_list: obj.auto_recover = True del obj def get_console_type(): """ Check whether console should be started before or after guest starting. """ if serial_type in ['tcp', 'unix', 'udp', 'tls']: for source in serial_dev.sources: if 'mode' in source and source['mode'] == 'connect': return 'server' return 'client' elif serial_type in ['file']: return 'client' elif serial_type in ['pipe']: return 'server' serial_type = params.get('serial_dev_type', 'pty') sources_str = params.get('serial_sources', '') username = params.get('username') password = params.get('password') console_target_type = params.get('console_target_type', 'serial') target_type = params.get('target_type', 'isa-serial') target_model = params.get('target_model', '') console_target_port = params.get('console_target_port', '0') second_serial_console = params.get('second_serial_console', 'no') == 'yes' custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev') auto_recover = params.get('auto_recover', 'no') client_pwd = params.get('client_pwd', None) server_pwd = params.get('server_pwd', None) machine_type = params.get('machine_type', '') remove_devices = params.get('remove_devices', 'serial,console').split(',') args_list = [client_pwd, server_pwd] for arg in args_list: if arg and arg.count("ENTER.YOUR."): raise test.fail("Please assign a value for %s!" % arg) vm_name = params.get('main_vm', 'virt-tests-vm1') vm = env.get_vm(vm_name) # it's used to clean up TLS objs later objs_list = [] vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() try: for device_type in remove_devices: vm_xml.remove_all_device_by_type(device_type) if serial_type == "tls": test_dict = dict(params) tls_obj = TLSConnection(test_dict) if auto_recover == "yes": objs_list.append(tls_obj) tls_obj.conn_setup(False, True, True) serial_dev = prepare_serial_device() if console_target_type == 'serial' or second_serial_console: logging.debug('Serial device:\n%s', serial_dev) vm_xml.add_device(serial_dev) if serial_type == "spiceport": spice_graphics_dev = prepare_spice_graphics_device() logging.debug('Spice graphics device:\n%s', spice_graphics_dev) vm_xml.add_device(spice_graphics_dev) console_dev = prepare_console_device() logging.debug('Console device:\n%s', console_dev) vm_xml.add_device(console_dev) if second_serial_console: console_target_type = 'serial' console_target_port = '1' console_dev = prepare_console_device() logging.debug('Console device:\n%s', console_dev) vm_xml.add_device(console_dev) vm_xml.undefine() res = virsh.define(vm_xml.xml) if res.stderr.find(r'Only the first console can be a serial port'): logging.debug("Test only the first console can be a serial" "succeeded") return test.fail("Test only the first console can be serial failed.") console_type = get_console_type() if not define_and_check(): logging.debug("Can't define the VM, exiting.") return if console_target_type != 'virtio': check_xml() expected_fails = [] if serial_type == 'nmdm' and platform.system() != 'FreeBSD': expected_fails.append("unsupported chardev 'nmdm'") # Prepare console before start when console is server if console_type == 'server': console = prepare_serial_console() res = virsh.start(vm_name) libvirt.check_result(res, expected_fails, []) if res.exit_status: logging.debug("Can't start the VM, exiting.") return check_qemu_cmdline() # Prepare console after start when console is client if console_type == 'client': console = prepare_serial_console() if (console_type is not None and serial_type != 'tls' and console_type != 'server'): check_serial_console(console, username, password) vm.destroy() finally: cleanup(objs_list) vm_xml_backup.sync()
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) vm_name = test_dict.get("main_vm") status_error = test_dict.get("status_error", "no") allowed_dn_str = params.get("tls_allowed_dn_list") if allowed_dn_str: allowed_dn_list = [] if not libvirt_version.version_compare(1, 0, 0): # Reverse the order in the dn list to workaround the # feature changes between RHEL 6 and RHEL 7 dn_list = allowed_dn_str.split(",") dn_list.reverse() allowed_dn_str = ','.join(dn_list) allowed_dn_list.append(allowed_dn_str) test_dict['tls_allowed_dn_list'] = allowed_dn_list transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") config_ipv6 = test_dict.get("config_ipv6", "no") tls_port = test_dict.get("tls_port", "") listen_addr = test_dict.get("listen_addr", "0.0.0.0") ssh_port = test_dict.get("ssh_port", "") tcp_port = test_dict.get("tcp_port", "") server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") no_any_config = params.get("no_any_config", "no") sasl_user_pwd = test_dict.get("sasl_user_pwd") sasl_allowed_users = test_dict.get("sasl_allowed_users") server_cn = test_dict.get("server_cn") custom_pki_path = test_dict.get("custom_pki_path") rm_client_key_cmd = test_dict.get("remove_client_key_cmd") rm_client_cert_cmd = test_dict.get("remove_client_cert_cmd") ca_cn_new = test_dict.get("ca_cn_new") no_verify = test_dict.get("no_verify", "no") ipv6_addr_des = test_dict.get("ipv6_addr_des") tls_sanity_cert = test_dict.get("tls_sanity_cert") restart_libvirtd = test_dict.get("restart_libvirtd", "yes") diff_virt_ver = test_dict.get("diff_virt_ver", "no") driver = test_dict.get("test_driver", "qemu") uri_path = test_dict.get("uri_path", "/system") virsh_cmd = params.get("virsh_cmd", "list") action = test_dict.get("libvirtd_action", "restart") uri_user = test_dict.get("uri_user", "") unix_sock_dir = test_dict.get("unix_sock_dir") mkdir_cmd = test_dict.get("mkdir_cmd") rmdir_cmd = test_dict.get("rmdir_cmd") adduser_cmd = test_dict.get("adduser_cmd") deluser_cmd = test_dict.get("deluser_cmd") auth_conf = test_dict.get("auth_conf") auth_conf_cxt = test_dict.get("auth_conf_cxt") polkit_pkla = test_dict.get("polkit_pkla") polkit_pkla_cxt = test_dict.get("polkit_pkla_cxt") ssh_setup = test_dict.get("ssh_setup", "no") tcp_setup = test_dict.get("tcp_setup", "no") tls_setup = test_dict.get("tls_setup", "no") unix_setup = test_dict.get("unix_setup", "no") ssh_recovery = test_dict.get("ssh_auto_recovery", "yes") tcp_recovery = test_dict.get("tcp_auto_recovery", "yes") tls_recovery = test_dict.get("tls_auto_recovery", "yes") unix_recovery = test_dict.get("unix_auto_recovery", "yes") port = "" # extra URI arguments extra_params = "" # it's used to clean up SSH, TLS, TCP, UNIX and SASL objs later objs_list = [] # redirect LIBVIRT_DEBUG log into test log later test_dict["logfile"] = test.logfile # Make sure all of parameters are assigned a valid value check_parameters(test_dict, test) # only simply connect libvirt daemon then return if no_any_config == "yes": test_dict["uri"] = "%s%s%s://%s" % (driver, plus, transport, uri_path) remote_access(test_dict, test) return # append extra 'pkipath' argument to URI if exists if custom_pki_path: extra_params = "?pkipath=%s" % custom_pki_path # append extra 'no_verify' argument to URI if exists if no_verify == "yes": extra_params = "?no_verify=1" # append extra 'socket' argument to URI if exists if unix_sock_dir: extra_params = "?socket=%s/libvirt-sock" % unix_sock_dir # generate auth.conf and default under the '/etc/libvirt' if auth_conf_cxt and auth_conf: cmd = "echo -e '%s' > %s" % (auth_conf_cxt, auth_conf) process.system(cmd, ignore_status=True, shell=True) # generate polkit_pkla and default under the # '/etc/polkit-1/localauthority/50-local.d/' if polkit_pkla_cxt and polkit_pkla: cmd = "echo -e '%s' > %s" % (polkit_pkla_cxt, polkit_pkla) process.system(cmd, ignore_status=True, shell=True) # generate remote IP if config_ipv6 == "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif config_ipv6 != "yes" and server_cn: remote_ip = server_cn elif config_ipv6 != "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif server_ip and transport != "unix": remote_ip = server_ip else: remote_ip = "" # get URI port if tcp_port != "": port = ":" + tcp_port if tls_port != "": port = ":" + tls_port if ssh_port != "" and not ipv6_addr_des: port = ":" + ssh_port # generate URI uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user, remote_ip, port, uri_path, extra_params) test_dict["uri"] = uri logging.debug("The final test dict:\n<%s>", test_dict) if virsh_cmd == "start" and transport != "unix": session = remote.wait_for_login("ssh", server_ip, "22", "root", server_pwd, "#") cmd = "virsh domstate %s" % vm_name status, output = session.cmd_status_output(cmd) if status: session.close() test.cancel(output) session.close() try: # setup IPv6 if config_ipv6 == "yes": ipv6_obj = IPv6Manager(test_dict) objs_list.append(ipv6_obj) ipv6_obj.setup() # compare libvirt version if needs if diff_virt_ver == "yes": compare_virt_version(server_ip, server_user, server_pwd, test) # setup SSH if transport == "ssh" or ssh_setup == "yes": if not test_dict.get("auth_pwd"): ssh_obj = SSHConnection(test_dict) if ssh_recovery == "yes": objs_list.append(ssh_obj) # setup test environment ssh_obj.conn_setup() # setup TLS if transport == "tls" or tls_setup == "yes": tls_obj = TLSConnection(test_dict) if tls_recovery == "yes": objs_list.append(tls_obj) # reserve cert path tmp_dir = tls_obj.tmp_dir # setup test environment if tls_sanity_cert == "no": # only setup CA and client tls_obj.conn_setup(False, True) else: # setup CA, server and client tls_obj.conn_setup() # setup TCP if transport == "tcp" or tcp_setup == "yes": tcp_obj = TCPConnection(test_dict) if tcp_recovery == "yes": objs_list.append(tcp_obj) # setup test environment tcp_obj.conn_setup() # create a directory if needs if mkdir_cmd: process.system(mkdir_cmd, ignore_status=True, shell=True) # setup UNIX if transport == "unix" or unix_setup == "yes": unix_obj = UNIXConnection(test_dict) if unix_recovery == "yes": objs_list.append(unix_obj) # setup test environment unix_obj.conn_setup() # need to restart libvirt service for negative testing if restart_libvirtd == "no": remotely_control_libvirtd(server_ip, server_user, server_pwd, action, status_error) # check TCP/IP listening by service if restart_libvirtd != "no" and transport != "unix": service = 'libvirtd' if transport == "ssh": service = 'ssh' check_listening_port_remote_by_service(server_ip, server_user, server_pwd, service, port, listen_addr) # remove client certifications if exist, only for TLS negative testing if rm_client_key_cmd: process.system(rm_client_key_cmd, ignore_status=True, shell=True) if rm_client_cert_cmd: process.system(rm_client_cert_cmd, ignore_status=True, shell=True) # add user to specific group if adduser_cmd: process.system(adduser_cmd, ignore_status=True, shell=True) # change /etc/pki/libvirt/servercert.pem then # restart libvirt service on the remote host if tls_sanity_cert == "no" and ca_cn_new: test_dict['ca_cn'] = ca_cn_new test_dict['ca_cakey_path'] = tmp_dir test_dict['scp_new_cacert'] = 'no' tls_obj_new = TLSConnection(test_dict) test_dict['tls_obj_new'] = tls_obj_new # only setup new CA and server tls_obj_new.conn_setup(True, False) # setup SASL certification # From libvirt-3.2.0, the default sasl change from # DIGEST-MD5 to GSSAPI. "sasl_user" is discarded. # More details: https://libvirt.org/auth.html#ACL_server_kerberos if sasl_user_pwd and not libvirt_version.version_compare(3, 2, 0): # covert string tuple and list to python data type sasl_user_pwd = eval(sasl_user_pwd) if sasl_allowed_users: sasl_allowed_users = eval(sasl_allowed_users) # create a sasl user sasl_obj = SASL(test_dict) objs_list.append(sasl_obj) sasl_obj.setup() for sasl_user, sasl_pwd in sasl_user_pwd: # need't authentication if the auth.conf is configured by user if not auth_conf: test_dict["auth_user"] = sasl_user test_dict["auth_pwd"] = sasl_pwd logging.debug("sasl_user, sasl_pwd = " "(%s, %s)", sasl_user, sasl_pwd) if sasl_allowed_users and sasl_user not in sasl_allowed_users: test_dict["status_error"] = "yes" patterns_extra_dict = {"authentication name": sasl_user} test_dict["patterns_extra_dict"] = patterns_extra_dict remote_access(test_dict, test) else: remote_access(test_dict, test) finally: # recovery test environment # Destroy the VM after all test are done if vm_name: vm = env.get_vm(vm_name) if vm and vm.is_alive(): vm.destroy(gracefully=False) if rmdir_cmd: process.system(rmdir_cmd, ignore_status=True, shell=True) if deluser_cmd: process.system(deluser_cmd, ignore_status=True, shell=True) if auth_conf and os.path.isfile(auth_conf): os.unlink(auth_conf) if polkit_pkla and os.path.isfile(polkit_pkla): os.unlink(polkit_pkla) cleanup(objs_list)
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) pattern = test_dict.get("filter_pattern", "") if ('@LIBVIRT' in pattern and distro.detect().name == 'rhel' and int(distro.detect().version) < 8): test.cancel("The test {} is not supported on current OS({}{}<8.0) as " "the keyword @LIBVIRT is not supported by gnutls on this " "OS.".format(test.name, distro.detect().name, distro.detect().version)) vm_name = test_dict.get("main_vm") status_error = test_dict.get("status_error", "no") allowed_dn_str = params.get("tls_allowed_dn_list") if allowed_dn_str: allowed_dn_list = [] if not libvirt_version.version_compare(1, 0, 0): # Reverse the order in the dn list to workaround the # feature changes between RHEL 6 and RHEL 7 dn_list = allowed_dn_str.split(",") dn_list.reverse() allowed_dn_str = ','.join(dn_list) allowed_dn_list.append(allowed_dn_str) test_dict['tls_allowed_dn_list'] = allowed_dn_list transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") config_ipv6 = test_dict.get("config_ipv6", "no") tls_port = test_dict.get("tls_port", "") listen_addr = test_dict.get("listen_addr", "0.0.0.0") ssh_port = test_dict.get("ssh_port", "") tcp_port = test_dict.get("tcp_port", "") server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") no_any_config = params.get("no_any_config", "no") sasl_type = test_dict.get("sasl_type", "gssapi") sasl_user_pwd = test_dict.get("sasl_user_pwd") sasl_allowed_users = test_dict.get("sasl_allowed_users") server_cn = test_dict.get("server_cn") custom_pki_path = test_dict.get("custom_pki_path") rm_client_key_cmd = test_dict.get("remove_client_key_cmd") rm_client_cert_cmd = test_dict.get("remove_client_cert_cmd") ca_cn_new = test_dict.get("ca_cn_new") no_verify = test_dict.get("no_verify", "no") ipv6_addr_des = test_dict.get("ipv6_addr_des") tls_sanity_cert = test_dict.get("tls_sanity_cert") restart_libvirtd = test_dict.get("restart_libvirtd", "yes") diff_virt_ver = test_dict.get("diff_virt_ver", "no") driver = test_dict.get("test_driver", "qemu") uri_path = test_dict.get("uri_path", "/system") virsh_cmd = params.get("virsh_cmd", "list") action = test_dict.get("libvirtd_action", "restart") uri_user = test_dict.get("uri_user", "") uri_aliases = test_dict.get("uri_aliases", "") uri_default = test_dict.get("uri_default", "") unix_sock_dir = test_dict.get("unix_sock_dir") mkdir_cmd = test_dict.get("mkdir_cmd") rmdir_cmd = test_dict.get("rmdir_cmd") adduser_cmd = test_dict.get("adduser_cmd") deluser_cmd = test_dict.get("deluser_cmd") auth_conf = test_dict.get("auth_conf") auth_conf_cxt = test_dict.get("auth_conf_cxt") polkit_pkla = test_dict.get("polkit_pkla") polkit_pkla_cxt = test_dict.get("polkit_pkla_cxt") ssh_setup = test_dict.get("ssh_setup", "no") tcp_setup = test_dict.get("tcp_setup", "no") tls_setup = test_dict.get("tls_setup", "no") unix_setup = test_dict.get("unix_setup", "no") ssh_recovery = test_dict.get("ssh_auto_recovery", "yes") tcp_recovery = test_dict.get("tcp_auto_recovery", "yes") tls_recovery = test_dict.get("tls_auto_recovery", "yes") unix_recovery = test_dict.get("unix_auto_recovery", "yes") sasl_allowed_username_list = test_dict.get("sasl_allowed_username_list") auth_unix_rw = test_dict.get("auth_unix_rw") kinit_pwd = test_dict.get("kinit_pwd") test_alias = test_dict.get("test_alias") config_list = [] port = "" # extra URI arguments extra_params = "" # it's used to clean up SSH, TLS, TCP, UNIX and SASL objs later objs_list = [] # redirect LIBVIRT_DEBUG log into test log later test_dict["logfile"] = test.logfile # Make sure all of parameters are assigned a valid value check_parameters(test_dict, test) # Make sure libvirtd on remote is running server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") remote_libvirtd = Libvirtd(session=server_session) if not remote_libvirtd.is_running(): logging.debug("start libvirt on remote") res = remote_libvirtd.start() if not res: status, output = server_session.cmd_status_output("journalctl -xe") test.error("Failed to start libvirtd on remote. [status]: %s " "[output]: %s." % (status, output)) server_session.close() if distro.detect().name == 'rhel' and int(distro.detect().version) >= 9: # Update crypto policies to legacy for RHEL>=9 per Bug 1931723 or # https://libguestfs.org/virt-v2v-input-xen.1.html#ssh-authentication crypto_policies = process.run("update-crypto-policies --set LEGACY", ignore_status=False) # only simply connect libvirt daemon then return if no_any_config == "yes": test_dict["uri"] = "%s%s%s://%s" % (driver, plus, transport, uri_path) remote_access(test_dict, test) return # append extra 'pkipath' argument to URI if exists if custom_pki_path: extra_params = "?pkipath=%s" % custom_pki_path # append extra 'no_verify' argument to URI if exists if no_verify == "yes": extra_params = "?no_verify=1" # append extra 'socket' argument to URI if exists if unix_sock_dir: extra_params = "?socket=%s/libvirt-sock" % unix_sock_dir # generate auth.conf and default under the '/etc/libvirt' if auth_conf_cxt and auth_conf: cmd = "echo -e '%s' > %s" % (auth_conf_cxt, auth_conf) process.system(cmd, ignore_status=True, shell=True) # generate polkit_pkla and default under the # '/etc/polkit-1/localauthority/50-local.d/' if polkit_pkla_cxt and polkit_pkla: cmd = "echo -e '%s' > %s" % (polkit_pkla_cxt, polkit_pkla) process.system(cmd, ignore_status=True, shell=True) # generate remote IP if config_ipv6 == "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif config_ipv6 != "yes" and server_cn: remote_ip = server_cn elif config_ipv6 != "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif server_ip and transport != "unix": remote_ip = server_ip else: remote_ip = "" # get URI port if tcp_port != "": port = ":" + tcp_port if tls_port != "": port = ":" + tls_port if ssh_port != "" and not ipv6_addr_des: port = ":" + ssh_port # generate URI uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user, remote_ip, port, uri_path, extra_params) test_dict["uri"] = uri logging.debug("The final test dict:\n<%s>", test_dict) if virsh_cmd == "start" and transport != "unix": session = remote.wait_for_login("ssh", server_ip, "22", "root", server_pwd, "#") cmd = "virsh domstate %s" % vm_name status, output = session.cmd_status_output(cmd) if status: session.close() test.cancel(output) session.close() try: # setup IPv6 if config_ipv6 == "yes": ipv6_obj = IPv6Manager(test_dict) objs_list.append(ipv6_obj) ipv6_obj.setup() # compare libvirt version if needs if diff_virt_ver == "yes": compare_virt_version(server_ip, server_user, server_pwd, test) # setup SSH if (transport == "ssh" or ssh_setup == "yes") and sasl_type != "plain": if not test_dict.get("auth_pwd"): ssh_obj = SSHConnection(test_dict) if ssh_recovery == "yes": objs_list.append(ssh_obj) # setup test environment ssh_obj.conn_setup() else: # To access to server with password, # cleanup authorized_keys on remote ssh_pubkey_file = "/root/.ssh/id_rsa.pub" if (os.path.exists("/root/.ssh/id_rsa") and os.path.exists(ssh_pubkey_file)): remote_file_obj = remote.RemoteFile(address=server_ip, client='scp', username=server_user, password=server_pwd, port='22', remote_path="/root/.ssh/authorized_keys") with open(ssh_pubkey_file, 'r') as fd: line = fd.read().split()[-1].rstrip('\n') line = ".*" + line remote_file_obj.remove([line]) objs_list.append(remote_file_obj) # setup TLS if transport == "tls" or tls_setup == "yes": tls_obj = TLSConnection(test_dict) if tls_recovery == "yes": objs_list.append(tls_obj) # reserve cert path tmp_dir = tls_obj.tmp_dir # setup test environment tls_obj.conn_setup() # setup TCP if transport == "tcp" or tcp_setup == "yes": tcp_obj = TCPConnection(test_dict) if tcp_recovery == "yes": objs_list.append(tcp_obj) # setup test environment tcp_obj.conn_setup() # create a directory if needs if mkdir_cmd: process.system(mkdir_cmd, ignore_status=True, shell=True) # setup UNIX if transport == "unix" or unix_setup == "yes" or sasl_type == "plain": unix_obj = UNIXConnection(test_dict) if unix_recovery == "yes": objs_list.append(unix_obj) # setup test environment unix_obj.conn_setup() # need to restart libvirt service for negative testing if restart_libvirtd == "no": remotely_control_libvirtd(server_ip, server_user, server_pwd, action, status_error) # check TCP/IP listening by service if restart_libvirtd != "no" and transport != "unix": service = 'libvirtd' if transport == "ssh": service = 'ssh' check_listening_port_remote_by_service(server_ip, server_user, server_pwd, service, port, listen_addr) # open the tls/tcp listening port on server if transport in ["tls", "tcp"]: firewalld_port = port[1:] server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") firewall_cmd = utils_iptables.Firewall_cmd(server_session) firewall_cmd.add_port(firewalld_port, 'tcp', permanent=True) server_session.close() if 'inv_transport' in test_dict: transport = test_dict['inv_transport'] uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user, remote_ip, port, uri_path, extra_params) test_dict["uri"] = uri config_list = [] if uri_aliases: uri_config = change_libvirtconf_on_client( {'uri_aliases': uri_aliases}) config_list.append(uri_config) test_dict["uri"] = test_alias if uri_default: test_dict["uri"] = "" # Delete the default URI environment variable to prevent overriding del os.environ['LIBVIRT_DEFAULT_URI'] uri_config = change_libvirtconf_on_client( {'uri_default': uri_default}) config_list.append(uri_config) ret = virsh.command('uri', debug=True) if uri_default.strip('"') in ret.stdout_text: logging.debug("Virsh output as expected.") else: test.fail("Expected virsh output: {} not found in:{}".format( uri_default.strip('"'), ret.stdout_text)) # remove client certifications if exist, only for TLS negative testing if rm_client_key_cmd: process.system(rm_client_key_cmd, ignore_status=True, shell=True) if rm_client_cert_cmd: process.system(rm_client_cert_cmd, ignore_status=True, shell=True) # add user to specific group if adduser_cmd: process.system(adduser_cmd, ignore_status=True, shell=True) # change /etc/pki/libvirt/servercert.pem then # restart libvirt service on the remote host if tls_sanity_cert == "no" and ca_cn_new: test_dict['ca_cn'] = ca_cn_new test_dict['scp_new_cacert'] = 'no' tls_obj_new = TLSConnection(test_dict) test_dict['tls_obj_new'] = tls_obj_new # only setup new CA and server tls_obj_new.conn_setup(True, False) # obtain and cache a ticket if kinit_pwd and sasl_type == 'gssapi' and auth_unix_rw == 'sasl': username_list = json.loads(sasl_allowed_username_list) for username in username_list: kinit_cmd = "echo '%s' | kinit %s" % (kinit_pwd, username) process.system(kinit_cmd, ignore_status=True, shell=True) # setup SASL certification # From libvirt-3.2.0, the default sasl change from # DIGEST-MD5 to GSSAPI. "sasl_user" is discarded. # More details: https://libvirt.org/auth.html#ACL_server_kerberos if sasl_user_pwd and sasl_type in ['digest-md5', 'plain']: # covert string tuple and list to python data type sasl_user_pwd = eval(sasl_user_pwd) if sasl_allowed_users: sasl_allowed_users = eval(sasl_allowed_users) # create a sasl user sasl_obj = SASL(test_dict) objs_list.append(sasl_obj) sasl_obj.setup() for sasl_user, sasl_pwd in sasl_user_pwd: # need't authentication if the auth.conf is configured by user if not auth_conf: if sasl_type == 'plain': test_dict["auth_pwd"] = server_pwd pass else: test_dict["auth_user"] = sasl_user test_dict["auth_pwd"] = sasl_pwd logging.debug("sasl_user, sasl_pwd = " "(%s, %s)", sasl_user, sasl_pwd) if sasl_allowed_users and sasl_user not in sasl_allowed_users: test_dict["status_error"] = "yes" patterns_extra_dict = {"authentication name": sasl_user, "enter your password": sasl_pwd} test_dict["patterns_extra_dict"] = patterns_extra_dict remote_access(test_dict, test) else: if not uri_default: remote_access(test_dict, test) finally: # recovery test environment # Destroy the VM after all test are done for config in config_list: restore_libvirtconf_on_client(config) cleanup(objs_list) if vm_name: vm = env.get_vm(vm_name) if vm and vm.is_alive(): vm.destroy(gracefully=False) if transport in ["tcp", "tls"] and 'firewalld_port' in locals(): server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") firewall_cmd = utils_iptables.Firewall_cmd(server_session) firewall_cmd.remove_port(firewalld_port, 'tcp', permanent=True) server_session.close() if rmdir_cmd: process.system(rmdir_cmd, ignore_status=True, shell=True) if deluser_cmd: process.system(deluser_cmd, ignore_status=True, shell=True) if auth_conf and os.path.isfile(auth_conf): os.unlink(auth_conf) if polkit_pkla and os.path.isfile(polkit_pkla): os.unlink(polkit_pkla)
def run(test, params, env): """ Test virsh migrate command. """ def cleanup_vm(vm, vm_name='', uri=''): """ Clean up vm in the src or destination host environment when doing the uni-direction migration. """ # Backup vm name and uri uri_bak = vm.connect_uri vm_name_bak = vm.name # Destroy and undefine vm vm.connect_uri = uri if uri else uri_bak vm.name = vm_name if vm_name else vm_name_bak logging.info("Cleaning up VM %s on %s", vm.name, vm.connect_uri) if vm.is_alive(): vm.destroy() if vm.is_persistent(): vm.undefine() # Restore vm connect_uri vm.connect_uri = uri_bak vm.name = vm_name_bak # Check whether there are unset parameters for v in list(itervalues(params)): if isinstance(v, string_types) and v.count("EXAMPLE"): test.cancel("Please set real value for %s" % v) # Params for virsh migrate options: live_migration = params.get("live_migration") == "yes" offline_migration = params.get("offline_migration") == "yes" persistent = params.get("persistent") == "yes" undefinesource = params.get("undefinesource") == "yes" p2p = params.get("p2p") == "yes" tunnelled = params.get("tunnelled") == "yes" postcopy = params.get("postcopy") == "yes" dname = params.get("dname") xml_option = params.get("xml_option") == "yes" persistent_xml_option = params.get("persistent_xml_option") == "yes" extra_options = params.get("virsh_migrate_extra", "") if live_migration and not extra_options.count("--live"): extra_options = "%s --live" % extra_options if offline_migration and not extra_options.count("--offline"): extra_options = "%s --offline" % extra_options if persistent and not extra_options.count("--persistent"): extra_options = "%s --persistent" % extra_options if undefinesource and not extra_options.count("--undefinesource"): extra_options = "%s --undefinesource" % extra_options if p2p and not extra_options.count("--p2p"): extra_options = "%s --p2p" % extra_options if tunnelled and not extra_options.count("--tunnelled"): extra_options = "%s --tunnelled" % extra_options if tunnelled and not extra_options.count("--p2p"): extra_options = "%s --p2p" % extra_options if postcopy and not extra_options.count("--postcopy"): extra_options = "%s --postcopy" % extra_options if dname and not extra_options.count("--dname"): extra_options = "%s --dname %s" % (extra_options, dname) if xml_option: pass if persistent_xml_option and not extra_options.count("--persistent"): extra_options = "%s --persistent" % extra_options if persistent_xml_option: pass # Set param migrate_options in case it is used somewhere: params.setdefault("migrate_options", extra_options) # Params for postcopy migration postcopy_timeout = int(params.get("postcopy_migration_timeout", "180")) # Params for migrate hosts: server_cn = params.get("server_cn") client_cn = params.get("client_cn") migrate_source_host = client_cn if client_cn else params.get( "migrate_source_host") migrate_dest_host = server_cn if server_cn else params.get( "migrate_dest_host") # Params for migrate uri transport = params.get("transport", "tls") transport_port = params.get("transport_port") uri_port = ":%s" % transport_port if transport_port else '' hypervisor_driver = params.get("hypervisor_driver", "qemu") hypervisor_mode = params.get("hypervisor_mode", 'system') if "virsh_migrate_desturi" not in list(params.keys()): params["virsh_migrate_desturi"] = "%s+%s://%s%s/%s" % ( hypervisor_driver, transport, migrate_dest_host, uri_port, hypervisor_mode) if "virsh_migrate_srcuri" not in list(params.keys()): params["virsh_migrate_srcuri"] = "%s:///%s" % (hypervisor_driver, hypervisor_mode) dest_uri = params.get("virsh_migrate_desturi") src_uri = params.get("virsh_migrate_srcuri") # Params for src vm cfg: src_vm_cfg = params.get("src_vm_cfg") src_vm_status = params.get("src_vm_status") with_graphic_passwd = params.get("with_graphic_passwd") graphic_passwd = params.get("graphic_passwd") # For test result check cancel_exception = False fail_exception = False exception = False result_check_pass = True # Objects(SSH, TLS and TCP, etc) to be cleaned up in finally objs_list = [] # VM objects for migration test vms = [] try: # Get a MigrationTest() Object logging.debug("Get a MigrationTest() object") obj_migration = migration.MigrationTest() # Setup libvirtd remote connection TLS connection env if transport == "tls": tls_obj = TLSConnection(params) # Setup CA, server(on dest host) and client(on src host) tls_obj.conn_setup() # Add tls_obj to objs_list objs_list.append(tls_obj) # Enable libvirtd remote connection transport port if transport == 'tls': transport_port = '16514' elif transport == 'tcp': transport_port = '16509' obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port) # Back up vm name for recovery in finally vm_name_backup = params.get("migrate_main_vm") # Get a vm object for migration logging.debug("Get a vm object for migration") vm = env.get_vm(vm_name_backup) # Back up vm xml for recovery in finally logging.debug("Backup vm xml before migration") vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if not vm_xml_backup: test.error("Backing up xmlfile failed.") # Prepare shared disk in vm xml for live migration: # Change the source of the first disk of vm to shared disk if live_migration: logging.debug("Prepare shared disk in vm xml for live migration") storage_type = params.get("storage_type") if storage_type == 'nfs': logging.debug("Prepare nfs shared disk in vm xml") nfs_mount_dir = params.get("nfs_mount_dir") libvirt.update_vm_disk_source(vm.name, nfs_mount_dir) libvirt.update_vm_disk_driver_cache(vm.name, driver_cache="none") else: # TODO:Other storage types test.cancel("Other storage type is not supported for now") pass # Prepare graphic password in vm xml if with_graphic_passwd in ["yes", "no"]: logging.debug("Set VM graphic passwd in vm xml") # Get graphics list in vm xml vmxml_tmp = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) graphics_list = vmxml_tmp.get_graphics_devices if not graphics_list: # Add spice graphic with passwd to vm xml logging.debug("Add spice graphic to vm xml") graphics.Graphics.add_graphic(vm.name, graphic_passwd, "spice") elif graphic_passwd: # Graphics already exist in vm xml and passwd is required # Add passwd to the first graphic device in vm xml logging.debug("Add graphic passwd to vm xml") vm_xml.VMXML.add_security_info(vmxml_tmp, graphic_passwd) vmxml_tmp.sync() else: # Graphics already exist in vm xml and non-passwd is required # Do nothing here as passwd has been removed by new_from_inactive_dumpxml() pass # Prepare for required src vm status. logging.debug("Turning %s into certain state.", vm.name) if src_vm_status == "running" and not vm.is_alive(): vm.start() elif src_vm_status == "shut off" and not vm.is_dead(): vm.destroy() # Prepare for required src vm persistency. logging.debug("Prepare for required src vm persistency") if src_vm_cfg == "persistent" and not vm.is_persistent(): logging.debug("Make src vm persistent") vm_xml_backup.define() elif src_vm_cfg == "transient" and vm.is_persistent(): logging.debug("Make src vm transient") vm.undefine() # Prepare for postcopy migration: install and run stress in VM if postcopy and src_vm_status == "running": logging.debug( "Install and run stress in vm for postcopy migration") pkg_name = 'stress' # Get a vm session logging.debug("Get a vm session") vm_session = vm.wait_for_login() if not vm_session: test.error("Can't get a vm session successfully") # Install package stress if it is not installed in vm logging.debug( "Check if stress tool is installed for postcopy migration") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) # Run stress in vm logging.debug("Run stress in vm") stress_args = params.get("stress_args") vm_session.cmd('stress %s' % stress_args) # Prepare for --xml <updated_xml_file>. if xml_option: logging.debug("Preparing new xml file for --xml option.") # Get the vm xml vmxml_tmp = vm_xml.VMXML.new_from_dumpxml( vm.name, "--security-info --migratable") # Update something in the xml file: e.g. title # Note: VM ABI shall not be broken when migrating with updated_xml updated_title = "VM Title in updated xml" vmxml_tmp.title = updated_title # Add --xml to migrate extra_options extra_options = ("%s --xml=%s" % (extra_options, vmxml_tmp.xml)) # Prepare for --persistent-xml <updated_xml_file>. if persistent_xml_option: logging.debug( "Preparing new xml file for --persistent-xml option.") # Get the vm xml vmxml_persist_tmp = vm_xml.VMXML.new_from_inactive_dumpxml( vm.name, "--security-info") # Update something in the xml file: e.g. title # Note: VM ABI shall not be broken when migrating with updated_xml updated_persist_title = "VM Title in updated persist xml" vmxml_persist_tmp.title = updated_persist_title # Add --persistent-xml to migrate extra_options extra_options = ("%s --persistent-xml=%s" % (extra_options, vmxml_persist_tmp.xml)) # Prepare host env: clean up vm on dest host logging.debug("Clean up vm on dest host before migration") if dname: cleanup_vm(vm, dname, dest_uri) else: cleanup_vm(vm, vm.name, dest_uri) # Prepare host env: set selinux state before migration logging.debug("Set selinux to enforcing before migration") utils_selinux.set_status(params.get("selinux_state", "enforcing")) # Check vm network connectivity by ping before migration logging.debug("Check vm network before migration") if src_vm_status == "running": obj_migration.ping_vm(vm, params) # Get VM uptime before migration if src_vm_status == "running": vm_uptime = vm.uptime() logging.info("Check VM uptime before migration: %s", vm_uptime) # Print vm active xml before migration process.system_output("virsh dumpxml %s --security-info" % vm.name, shell=True) # Print vm inactive xml before migration process.system_output("virsh dumpxml %s --security-info --inactive" % vm.name, shell=True) # Do uni-direction migration. # NOTE: vm.connect_uri will be set to dest_uri once migration is complete successfully logging.debug("Start to do migration test.") vms.append(vm) if postcopy: # Monitor the qemu monitor event of "postcopy-active" for postcopy migration logging.debug( "Monitor the qemu monitor event for postcopy migration") virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) cmd = "qemu-monitor-event --loop --domain %s --event MIGRATION" % vm.name virsh_session.sendline(cmd) # Do live migration and switch to postcopy by "virsh migrate-postcopy" logging.debug("Start to do postcopy migration") obj_migration.do_migration(vms, src_uri, dest_uri, "orderly", options="", thread_timeout=postcopy_timeout, ignore_status=True, func=virsh.migrate_postcopy, extra_opts=extra_options, shell=True) # Check migration result obj_migration.check_result(obj_migration.ret, params) # Check "postcopy-active" event after postcopy migration logging.debug( "Check postcopy-active event after postcopy migration") virsh_session.send_ctrl("^C") events_output = virsh_session.get_stripped_output() logging.debug("events_output are %s", events_output) pattern = "postcopy-active" if not re.search(pattern, events_output): test.fail("Migration didn't switch to postcopy mode") virsh_session.close() virsh_session.close() else: logging.debug("Start to do precopy migration") obj_migration.do_migration(vms, src_uri, dest_uri, "orderly", options="", ignore_status=True, extra_opts=extra_options) # Check migration result obj_migration.check_result(obj_migration.ret, params) """ # Check src vm after migration # First, update vm name and connect_uri to src vm's """ vm.name = vm_name_backup vm.connect_uri = src_uri logging.debug("Start to check %s state on src %s after migration.", vm.name, src_uri) # Check src vm status after migration: existence, running, shutoff, etc logging.debug("Check vm status on source after migration") if offline_migration: if src_vm_status == "shut off" and undefinesource: if vm.exists(): result_check_pass = False logging.error( "Src vm should not exist after offline migration" " with --undefinesource") logging.debug("Src vm state is %s" % vm.state()) elif not libvirt.check_vm_state( vm.name, src_vm_status, uri=vm.connect_uri): result_check_pass = False logging.error("Src vm should be %s after offline migration" % src_vm_status) logging.debug("Src vm state is %s" % vm.state()) if live_migration: if not undefinesource and src_vm_cfg == "persistent": if not libvirt.check_vm_state( vm.name, "shut off", uri=vm.connect_uri): result_check_pass = False logging.error( "Src vm should be shutoff after live migration") logging.debug("Src vm state is %s" % vm.state()) elif vm.exists(): result_check_pass = False logging.error("Src vm should not exist after live migration") logging.debug("Src vm state is %s" % vm.state()) # Check src vm status after migration: persistency logging.debug("Check vm persistency on source after migration") if src_vm_cfg == "persistent" and not undefinesource: if not vm.is_persistent(): # Src vm should be persistent after migration without --undefinesource result_check_pass = False logging.error("Src vm should be persistent after migration") elif vm.is_persistent(): result_check_pass = False logging.error("Src vm should be not be persistent after migration") """ # Check dst vm after migration # First, update vm name and connect_uri to dst vm's """ vm.name = dname if dname else vm.name vm.connect_uri = dest_uri logging.debug("Start to check %s state on target %s after migration.", vm.name, vm.connect_uri) # Check dst vm status after migration: running, shutoff, etc logging.debug("Check vm status on target after migration") if live_migration: if not libvirt.check_vm_state( vm.name, src_vm_status, uri=vm.connect_uri): result_check_pass = False logging.error("Dst vm should be %s after live migration", src_vm_status) elif vm.is_alive(): result_check_pass = False logging.error("Dst vm should not be alive after offline migration") # Print vm active xml after migration process.system_output("virsh -c %s dumpxml %s --security-info" % (vm.connect_uri, vm.name), shell=True) # Print vm inactive xml after migration process.system_output( "virsh -c %s dumpxml %s --security-info --inactive" % (vm.connect_uri, vm.name), shell=True) # Check dst vm xml after migration logging.debug("Check vm xml on target after migration") remote_virsh = virsh.Virsh(uri=vm.connect_uri) vmxml_active_tmp = vm_xml.VMXML.new_from_dumpxml( vm.name, "--security-info", remote_virsh) vmxml_inactive_tmp = vm_xml.VMXML.new_from_inactive_dumpxml( vm.name, "--security-info", remote_virsh) # Check dst vm xml after migration: --xml <updated_xml_file> if xml_option and not offline_migration: logging.debug("Check vm active xml for --xml") if not vmxml_active_tmp.title == updated_title: print("vmxml active tmp title is %s" % vmxml_active_tmp.title) result_check_pass = False logging.error("--xml doesn't take effect in migration") if xml_option and offline_migration: logging.debug("Check vm inactive xml for --xml") if not vmxml_active_tmp.title == updated_title: result_check_pass = False logging.error("--xml doesn't take effect in migration") # Check dst vm xml after migration: --persistent-xml <updated_xml_file> if persistent_xml_option: logging.debug("Check vm inactive xml for --persistent-xml") if not offline_migration and not vmxml_inactive_tmp.title == updated_persist_title: print("vmxml inactive tmp title is %s" % vmxml_inactive_tmp.title) result_check_pass = False logging.error( "--persistent-xml doesn't take effect in live migration") elif offline_migration and vmxml_inactive_tmp.title == updated_persist_title: result_check_pass = False logging.error( "--persistent-xml should not take effect in offline " "migration") # Check dst vm xml after migration: graphic passwd if with_graphic_passwd == "yes": logging.debug("Check graphic passwd in vm xml after migration") graphic_active = vmxml_active_tmp.devices.by_device_tag( 'graphics')[0] graphic_inactive = vmxml_inactive_tmp.devices.by_device_tag( 'graphics')[0] try: logging.debug("Check graphic passwd in active vm xml") if graphic_active.passwd != graphic_passwd: result_check_pass = False logging.error( "Graphic passwd in active xml of dst vm should be %s", graphic_passwd) logging.debug("Check graphic passwd in inactive vm xml") if graphic_inactive.passwd != graphic_passwd: result_check_pass = False logging.error( "Graphic passwd in inactive xml of dst vm should be %s", graphic_passwd) except LibvirtXMLNotFoundError: result_check_pass = False logging.error("Graphic passwd lost in dst vm xml") # Check dst vm uptime, network, etc after live migration if live_migration: # Check dst VM uptime after migration # Note: migrated_vm_uptime should be greater than the vm_uptime got # before migration migrated_vm_uptime = vm.uptime(connect_uri=dest_uri) logging.info( "Check VM uptime in destination after " "migration: %s", migrated_vm_uptime) if not migrated_vm_uptime: result_check_pass = False logging.error("Failed to check vm uptime after migration") elif vm_uptime > migrated_vm_uptime: result_check_pass = False logging.error( "VM went for a reboot while migrating to destination") # Check dst VM network connectivity after migration logging.debug("Check VM network connectivity after migrating") obj_migration.ping_vm(vm, params, uri=dest_uri) # Restore vm.connect_uri as it is set to src_uri in ping_vm() logging.debug( "Restore vm.connect_uri as it is set to src_uri in ping_vm()") vm.connect_uri = dest_uri # Check dst vm status after migration: persistency logging.debug("Check vm persistency on target after migration") if persistent: if not vm.is_persistent(): result_check_pass = False logging.error("Dst vm should be persistent after migration " "with --persistent") time.sleep(10) # Destroy vm and check vm state should be shutoff. BZ#1076354 vm.destroy() if not libvirt.check_vm_state( vm.name, "shut off", uri=vm.connect_uri): result_check_pass = False logging.error( "Dst vm with name %s should exist and be shutoff", vm.name) elif vm.is_persistent(): result_check_pass = False logging.error("Dst vm should not be persistent after migration " "without --persistent") finally: logging.debug("Start to clean up env") # Clean up vm on dest and src host for vm in vms: cleanup_vm(vm, vm_name=dname, uri=dest_uri) cleanup_vm(vm, vm_name=vm_name_backup, uri=src_uri) # Recover source vm defination (just in case). logging.info("Recover vm defination on source") if vm_xml_backup: vm_xml_backup.define() # Clean up SSH, TCP, TLS test env if objs_list and len(objs_list) > 0: logging.debug("Clean up test env: SSH, TCP, TLS, etc") for obj in objs_list: obj.auto_recover = True obj.__del__() # Disable libvirtd remote connection transport port obj_migration.migrate_pre_setup(dest_uri, params, cleanup=True, ports=transport_port) # Check test result. if not result_check_pass: test.fail("Migration succeed, but some check points didn't pass." "Please check the error log for details")
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail( "Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int( float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int( virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail( "Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug( "{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = { 'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name } channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() if not disable_verify_peer: qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config( qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if low_speed: control_migrate_speed(int(low_speed)) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug( "Remote guest uses {} for channel device".format( host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } cmd_result = remote.run_remote_cmd( cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command( vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) cmd_parms = { 'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd } remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail( "%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Run the test :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() bk_uri = vm.connect_uri migration_test = migration.MigrationTest() migration_test.check_parameters(params) extra_args = migration_test.update_virsh_migrate_extra_args(params) extra = params.get("virsh_migrate_extra") postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") options = params.get("virsh_migrate_options", "--live --p2p --persistent --verbose") virsh_options = params.get("virsh_options", "") stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") #action_params_map = params.get('action_params_map') migrate_speed = params.get("migrate_speed") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = params.get("return_port") if action_during_mig: action_during_mig = migration_base.parse_funcs(action_during_mig, test, params) setup_tls = params.get("setup_tls", "no") if setup_tls == "yes": if not libvirt_version.version_compare(6, 9, 0): test.cancel("Cannot support migrate_tls_force in this libvirt version.") qemu_conf_src = eval(params.get("qemu_conf_src", "{}")) qemu_conf_dest = params.get("qemu_conf_dest", "{}") status_error = "yes" == params.get("status_error", "no") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") server_ip = params.get("server_ip") server_user = params.get("server_user") server_pwd = params.get("server_pwd") server_params = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} tls_obj = None qemu_conf_local = None qemu_conf_remote = None # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() try: libvirt.set_vm_disk(vm, params) # Setup libvirtd remote connection TLS connection env if setup_tls == "yes": tls_obj = TLSConnection(params) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup default value for migrate_tls_force if migrate_tls_force_default: value_list = ["migrate_tls_force"] # Setup migrate_tls_force default value on remote server_params['file_path'] = "/etc/libvirt/qemu.conf" libvirt_config.remove_key_in_conf(value_list, "qemu", remote_params=server_params) # Setup migrate_tls_force default value on local libvirt_config.remove_key_in_conf(value_list, "qemu") # Update remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( server_params, qemu_conf_dest, "/etc/libvirt/qemu.conf") # Update local qemu conf if qemu_conf_src: qemu_conf_local = libvirt.customize_libvirt_config(qemu_conf_src, "qemu") if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) vm.wait_for_login().close() if stress_package: migration_test.run_stress_in_vm(vm, params) if migrate_speed: mode = 'both' if '--postcopy' in postcopy_options else 'precopy' migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) # Execute migration process migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) func_returns = dict(migration_test.func_ret) migration_test.func_ret.clear() logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, bk_uri, dest_uri, test) if migrate_again: action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), test, params) extra_args['status_error'] = params.get("migrate_again_status_error", "no") if params.get("virsh_migrate_extra_mig_again"): extra = params.get("virsh_migrate_extra_mig_again") migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, extra_args) if return_port: func_returns = dict(migration_test.func_ret) logging.debug("Migration returns function " "results:%s", func_returns) port_second = get_used_port(func_returns) if port_used != port_second: test.fail("Expect same port '{}' is used as previous one, " "but found new one '{}'".format(port_used, port_second)) else: logging.debug("Same port '%s' was used as " "expected", port_second) migration_test.post_migration_check([vm], params, uri=dest_uri) finally: logging.info("Recover test environment") vm.connect_uri = bk_uri # Clean VM on destination and source migration_test.cleanup_vm(vm, dest_uri) # Restore local qemu conf and restart libvirtd if qemu_conf_local: logging.debug("Recover local qemu configurations") libvirt.customize_libvirt_config(None, config_type="qemu", is_recover=True, config_object=qemu_conf_local) # Restore remote qemu conf and restart libvirtd if qemu_conf_remote: logging.debug("Recover remote qemu configurations") del qemu_conf_remote # Clean up TLS test env: if setup_tls and tls_obj: logging.debug("Clean up TLS object") del tls_obj orig_config_xml.sync()
def run(test, params, env): """ Test command: virt-admin server-update-tls. 1) when tls related files changed, notify server to update TLS related files online, without restart daemon """ def add_remote_firewall_port(port, params): """ Add the port on remote host :param port: port to add :param params: Dictionary with the test parameters """ server_ip = params.get("server_ip") server_user = params.get("server_user") server_pwd = params.get("server_pwd") remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") firewall_cmd = utils_iptables.Firewall_cmd(remote_session) firewall_cmd.add_port(port, 'tcp', permanent=True) remote_session.close() def remove_remote_firewall_port(port, params): """ Remove the port on remote host :param port: port to remove :param params: Dictionary with the test parameters """ server_ip = params.get("server_ip") server_user = params.get("server_user") server_pwd = params.get("server_pwd") remote_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") firewall_cmd = utils_iptables.Firewall_cmd(remote_session) firewall_cmd.remove_port(port, 'tcp', permanent=True) remote_session.close() def update_server_pem(cert_saved_dir, remote_libvirt_pki_dir): """ Update the server info and re-build servercert :param cert_saved_dir: The directory where cert files are saved :param remote_libvirt_pki_dir: Directory to store pki on remote """ logging.debug("Update serverinfo") serverinfo = os.path.join(cert_saved_dir, "server.info") with open(os.path.join(cert_saved_dir, "server.info"), "r") as f1: lines = f1.readlines() with open(os.path.join(cert_saved_dir, "server2.info"), "w") as f2: for line in lines: if fake_ip in line: line = line.replace(fake_ip, server_ip) f2.write(line) cmd = ("certtool --generate-certificate --load-privkey " "{0}/serverkey.pem --load-ca-certificate {0}/cacert.pem " "--load-ca-privkey {0}/cakey.pem --template {0}/server2.info " "--outfile {0}/servercert.pem".format(cert_saved_dir)) servercert_pem = os.path.join(cert_saved_dir, "servercert.pem") process.run(cmd, shell=True, verbose=True) remote.copy_files_to(server_ip, 'scp', server_user, server_pwd, '22', servercert_pem, remote_libvirt_pki_dir) server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") tls_port = params.get("tls_port", "16514") uri = "qemu+tls://%s:%s/system" % (server_ip, tls_port) remote_virt_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } tls_obj = None if not libvirt_version.version_compare(6, 2, 0): test.cancel("This libvirt version doesn't support " "virt-admin server-update-tls.") try: vp = virt_admin.VirtadminPersistent(**remote_virt_dargs) add_remote_firewall_port(tls_port, params) # Generate a fake ip for testing repl = str(int(server_ip.strip().split('.')[-1]) + 1 % 255) fake_ip = re.sub("([0-9]+)$", repl, server_ip) params.update({"server_info_ip": fake_ip}) tls_obj = TLSConnection(params) tls_obj.conn_setup() tls_obj.auto_recover = True # Connection should fail because TLS is set incorrectly ret, output = libvirt.connect_libvirtd(uri) if ret: test.fail( "Connection should fail but succeed. ret: {}, output: {}". format(ret, output)) if "authentication failed" not in output: test.fail( "Unablee to find the expected error message. output: %s" % output) tmp_dir = tls_obj.tmp_dir remote_libvirt_pki_dir = tls_obj.libvirt_pki_dir update_server_pem(tmp_dir, remote_libvirt_pki_dir) serv_name = virt_admin.check_server_name() logging.debug("service name: %s", serv_name) result = vp.server_update_tls(serv_name, debug=True) libvirt.check_exit_status(result) # Re-connect to the server ret, output = libvirt.connect_libvirtd(uri) if not ret: test.fail("Connection fails, ret: {}, output: {}".format( ret, output)) finally: logging.info("Recover test environment") remove_remote_firewall_port(tls_port, params)
def run(test, params, env): """ Test remote access with TCP, TLS connection """ test_dict = dict(params) vm_name = test_dict.get("main_vm") status_error = test_dict.get("status_error", "no") allowed_dn_str = params.get("tls_allowed_dn_list") if allowed_dn_str: allowed_dn_list = [] if not libvirt_version.version_compare(1, 0, 0): # Reverse the order in the dn list to workaround the # feature changes between RHEL 6 and RHEL 7 dn_list = allowed_dn_str.split(",") dn_list.reverse() allowed_dn_str = ','.join(dn_list) allowed_dn_list.append(allowed_dn_str) test_dict['tls_allowed_dn_list'] = allowed_dn_list transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") config_ipv6 = test_dict.get("config_ipv6", "no") tls_port = test_dict.get("tls_port", "") listen_addr = test_dict.get("listen_addr", "0.0.0.0") ssh_port = test_dict.get("ssh_port", "") tcp_port = test_dict.get("tcp_port", "") server_ip = test_dict.get("server_ip") server_user = test_dict.get("server_user") server_pwd = test_dict.get("server_pwd") no_any_config = params.get("no_any_config", "no") sasl_type = test_dict.get("sasl_type", "gssapi") sasl_user_pwd = test_dict.get("sasl_user_pwd") sasl_allowed_users = test_dict.get("sasl_allowed_users") server_cn = test_dict.get("server_cn") custom_pki_path = test_dict.get("custom_pki_path") rm_client_key_cmd = test_dict.get("remove_client_key_cmd") rm_client_cert_cmd = test_dict.get("remove_client_cert_cmd") ca_cn_new = test_dict.get("ca_cn_new") no_verify = test_dict.get("no_verify", "no") ipv6_addr_des = test_dict.get("ipv6_addr_des") tls_sanity_cert = test_dict.get("tls_sanity_cert") restart_libvirtd = test_dict.get("restart_libvirtd", "yes") diff_virt_ver = test_dict.get("diff_virt_ver", "no") driver = test_dict.get("test_driver", "qemu") uri_path = test_dict.get("uri_path", "/system") virsh_cmd = params.get("virsh_cmd", "list") action = test_dict.get("libvirtd_action", "restart") uri_user = test_dict.get("uri_user", "") unix_sock_dir = test_dict.get("unix_sock_dir") mkdir_cmd = test_dict.get("mkdir_cmd") rmdir_cmd = test_dict.get("rmdir_cmd") adduser_cmd = test_dict.get("adduser_cmd") deluser_cmd = test_dict.get("deluser_cmd") auth_conf = test_dict.get("auth_conf") auth_conf_cxt = test_dict.get("auth_conf_cxt") polkit_pkla = test_dict.get("polkit_pkla") polkit_pkla_cxt = test_dict.get("polkit_pkla_cxt") ssh_setup = test_dict.get("ssh_setup", "no") tcp_setup = test_dict.get("tcp_setup", "no") tls_setup = test_dict.get("tls_setup", "no") unix_setup = test_dict.get("unix_setup", "no") ssh_recovery = test_dict.get("ssh_auto_recovery", "yes") tcp_recovery = test_dict.get("tcp_auto_recovery", "yes") tls_recovery = test_dict.get("tls_auto_recovery", "yes") unix_recovery = test_dict.get("unix_auto_recovery", "yes") port = "" # extra URI arguments extra_params = "" # it's used to clean up SSH, TLS, TCP, UNIX and SASL objs later objs_list = [] # redirect LIBVIRT_DEBUG log into test log later test_dict["logfile"] = test.logfile # Make sure all of parameters are assigned a valid value check_parameters(test_dict, test) # only simply connect libvirt daemon then return if no_any_config == "yes": test_dict["uri"] = "%s%s%s://%s" % (driver, plus, transport, uri_path) remote_access(test_dict, test) return # append extra 'pkipath' argument to URI if exists if custom_pki_path: extra_params = "?pkipath=%s" % custom_pki_path # append extra 'no_verify' argument to URI if exists if no_verify == "yes": extra_params = "?no_verify=1" # append extra 'socket' argument to URI if exists if unix_sock_dir: extra_params = "?socket=%s/libvirt-sock" % unix_sock_dir # generate auth.conf and default under the '/etc/libvirt' if auth_conf_cxt and auth_conf: cmd = "echo -e '%s' > %s" % (auth_conf_cxt, auth_conf) process.system(cmd, ignore_status=True, shell=True) # generate polkit_pkla and default under the # '/etc/polkit-1/localauthority/50-local.d/' if polkit_pkla_cxt and polkit_pkla: cmd = "echo -e '%s' > %s" % (polkit_pkla_cxt, polkit_pkla) process.system(cmd, ignore_status=True, shell=True) # generate remote IP if config_ipv6 == "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif config_ipv6 != "yes" and server_cn: remote_ip = server_cn elif config_ipv6 != "yes" and ipv6_addr_des: remote_ip = "[%s]" % ipv6_addr_des elif server_ip and transport != "unix": remote_ip = server_ip else: remote_ip = "" # get URI port if tcp_port != "": port = ":" + tcp_port if tls_port != "": port = ":" + tls_port if ssh_port != "" and not ipv6_addr_des: port = ":" + ssh_port # generate URI uri = "%s%s%s://%s%s%s%s%s" % (driver, plus, transport, uri_user, remote_ip, port, uri_path, extra_params) test_dict["uri"] = uri logging.debug("The final test dict:\n<%s>", test_dict) if virsh_cmd == "start" and transport != "unix": session = remote.wait_for_login("ssh", server_ip, "22", "root", server_pwd, "#") cmd = "virsh domstate %s" % vm_name status, output = session.cmd_status_output(cmd) if status: session.close() test.cancel(output) session.close() try: # setup IPv6 if config_ipv6 == "yes": ipv6_obj = IPv6Manager(test_dict) objs_list.append(ipv6_obj) ipv6_obj.setup() # compare libvirt version if needs if diff_virt_ver == "yes": compare_virt_version(server_ip, server_user, server_pwd, test) # setup SSH if transport == "ssh" or ssh_setup == "yes": if not test_dict.get("auth_pwd"): ssh_obj = SSHConnection(test_dict) if ssh_recovery == "yes": objs_list.append(ssh_obj) # setup test environment ssh_obj.conn_setup() # setup TLS if transport == "tls" or tls_setup == "yes": tls_obj = TLSConnection(test_dict) if tls_recovery == "yes": objs_list.append(tls_obj) # reserve cert path tmp_dir = tls_obj.tmp_dir # setup test environment if tls_sanity_cert == "no": # only setup CA and client tls_obj.conn_setup(False, True) else: # setup CA, server and client tls_obj.conn_setup() # setup TCP if transport == "tcp" or tcp_setup == "yes": tcp_obj = TCPConnection(test_dict) if tcp_recovery == "yes": objs_list.append(tcp_obj) # setup test environment tcp_obj.conn_setup() # create a directory if needs if mkdir_cmd: process.system(mkdir_cmd, ignore_status=True, shell=True) # setup UNIX if transport == "unix" or unix_setup == "yes": unix_obj = UNIXConnection(test_dict) if unix_recovery == "yes": objs_list.append(unix_obj) # setup test environment unix_obj.conn_setup() # need to restart libvirt service for negative testing if restart_libvirtd == "no": remotely_control_libvirtd(server_ip, server_user, server_pwd, action, status_error) # check TCP/IP listening by service if restart_libvirtd != "no" and transport != "unix": service = 'libvirtd' if transport == "ssh": service = 'ssh' check_listening_port_remote_by_service(server_ip, server_user, server_pwd, service, port, listen_addr) # remove client certifications if exist, only for TLS negative testing if rm_client_key_cmd: process.system(rm_client_key_cmd, ignore_status=True, shell=True) if rm_client_cert_cmd: process.system(rm_client_cert_cmd, ignore_status=True, shell=True) # add user to specific group if adduser_cmd: process.system(adduser_cmd, ignore_status=True, shell=True) # change /etc/pki/libvirt/servercert.pem then # restart libvirt service on the remote host if tls_sanity_cert == "no" and ca_cn_new: test_dict['ca_cn'] = ca_cn_new test_dict['ca_cakey_path'] = tmp_dir test_dict['scp_new_cacert'] = 'no' tls_obj_new = TLSConnection(test_dict) test_dict['tls_obj_new'] = tls_obj_new # only setup new CA and server tls_obj_new.conn_setup(True, False) # setup SASL certification # From libvirt-3.2.0, the default sasl change from # DIGEST-MD5 to GSSAPI. "sasl_user" is discarded. # More details: https://libvirt.org/auth.html#ACL_server_kerberos if sasl_user_pwd and sasl_type == 'digest-md5': # covert string tuple and list to python data type sasl_user_pwd = eval(sasl_user_pwd) if sasl_allowed_users: sasl_allowed_users = eval(sasl_allowed_users) # create a sasl user sasl_obj = SASL(test_dict) objs_list.append(sasl_obj) sasl_obj.setup() for sasl_user, sasl_pwd in sasl_user_pwd: # need't authentication if the auth.conf is configured by user if not auth_conf: test_dict["auth_user"] = sasl_user test_dict["auth_pwd"] = sasl_pwd logging.debug("sasl_user, sasl_pwd = " "(%s, %s)", sasl_user, sasl_pwd) if sasl_allowed_users and sasl_user not in sasl_allowed_users: test_dict["status_error"] = "yes" patterns_extra_dict = {"authentication name": sasl_user} test_dict["patterns_extra_dict"] = patterns_extra_dict remote_access(test_dict, test) else: remote_access(test_dict, test) finally: # recovery test environment # Destroy the VM after all test are done cleanup(objs_list) if vm_name: vm = env.get_vm(vm_name) if vm and vm.is_alive(): vm.destroy(gracefully=False) if rmdir_cmd: process.system(rmdir_cmd, ignore_status=True, shell=True) if deluser_cmd: process.system(deluser_cmd, ignore_status=True, shell=True) if auth_conf and os.path.isfile(auth_conf): os.unlink(auth_conf) if polkit_pkla and os.path.isfile(polkit_pkla): os.unlink(polkit_pkla)
def run(test, params, env): """ Test for basic serial character device function. 1) Define the VM with specified serial device and define result meets expectation. 2) Test whether defined XML meets expectation 3) Start the guest and check if start result meets expectation 4) Test the function of started serial device 5) Shutdown the VM and check whether cleaned up properly 6) Clean up environment """ def set_targets(serial): """ Prepare a serial device XML according to parameters """ machine = platform.machine() if "ppc" in machine: serial.target_model = 'spapr-vty' serial.target_type = 'spapr-vio-serial' elif "aarch" in machine: serial.target_model = 'pl011' serial.target_type = 'system-serial' else: serial.target_model = target_type serial.target_type = target_type def prepare_spice_graphics_device(): """ Prepare a spice graphics device XML according to parameters """ graphic = Graphics(type_name='spice') graphic.autoport = "yes" graphic.port = "-1" graphic.tlsPort = "-1" return graphic def prepare_serial_device(): """ Prepare a serial device XML according to parameters """ local_serial_type = serial_type if serial_type == "tls": local_serial_type = "tcp" serial = librarian.get('serial')(local_serial_type) serial.target_port = "0" set_targets(serial) sources = [] logging.debug(sources_str) for source_str in sources_str.split(): source_dict = {} for att in source_str.split(','): key, val = att.split(':') source_dict[key] = val sources.append(source_dict) serial.sources = sources return serial def prepare_console_device(): """ Prepare a serial device XML according to parameters """ local_serial_type = serial_type if serial_type == "tls": local_serial_type = "tcp" console = librarian.get('console')(local_serial_type) console.target_type = console_target_type console.target_port = console_target_port sources = [] logging.debug(sources_str) for source_str in sources_str.split(): source_dict = {} for att in source_str.split(','): key, val = att.split(':') source_dict[key] = val sources.append(source_dict) console.sources = sources return console def define_and_check(): """ Predict the error message when defining and try to define the guest with testing serial device. """ fail_patts = [] if serial_type in ['dev', 'file', 'pipe', 'unix'] and not any( ['path' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source path attribute for char device") if serial_type in ['tcp'] and not any( ['host' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source host attribute for char device") if serial_type in ['tcp', 'udp'] and not any( ['service' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source service attribute for char " "device") if serial_type in ['spiceport'] and not any( ['channel' in s for s in serial_dev.sources]): fail_patts.append(r"Missing source channel attribute for char " "device") if serial_type in ['nmdm'] and not any( ['master' in s for s in serial_dev.sources]): fail_patts.append(r"Missing master path attribute for nmdm device") if serial_type in ['nmdm'] and not any( ['slave' in s for s in serial_dev.sources]): fail_patts.append(r"Missing slave path attribute for nmdm device") if serial_type in ['spicevmc']: fail_patts.append(r"spicevmc device type only supports virtio") vm_xml.undefine() res = vm_xml.virsh.define(vm_xml.xml) libvirt.check_result(res, expected_fails=fail_patts) return not res.exit_status def check_xml(): """ Predict the result serial device and generated console device and check the result domain XML against expectation """ console_cls = librarian.get('console') local_serial_type = serial_type if serial_type == 'tls': local_serial_type = 'tcp' # Predict expected serial and console XML expected_console = console_cls(local_serial_type) if local_serial_type == 'udp': sources = [] for source in serial_dev.sources: if 'service' in source and 'mode' not in source: source['mode'] = 'connect' sources.append(source) else: sources = serial_dev.sources expected_console.sources = sources if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_console.protocol_type = serial_dev.protocol_type else: expected_console.protocol_type = "raw" expected_console.target_port = serial_dev.target_port if 'target_type' in serial_dev: expected_console.target_type = serial_dev.target_type expected_console.target_type = console_target_type logging.debug("Expected console XML is:\n%s", expected_console) # Get current serial and console XML current_xml = VMXML.new_from_dumpxml(vm_name) serial_elem = current_xml.xmltreefile.find('devices/serial') console_elem = current_xml.xmltreefile.find('devices/console') if console_elem is None: test.fail("Expect generate console automatically, " "but found none.") if serial_elem and console_target_type != 'serial': test.fail("Don't Expect exist serial device, " "but found:\n%s" % serial_elem) cur_console = console_cls.new_from_element(console_elem) logging.debug("Current console XML is:\n%s", cur_console) # Compare current serial and console with oracle. if not expected_console == cur_console: # "==" has been override test.fail("Expect generate console:\n%s\nBut got:\n%s" % (expected_console, cur_console)) if console_target_type == 'serial': serial_cls = librarian.get('serial') expected_serial = serial_cls(local_serial_type) expected_serial.sources = sources set_targets(expected_serial) if local_serial_type == 'tcp': if 'protocol_type' in local_serial_type: expected_serial.protocol_type = serial_dev.protocol_type else: expected_serial.protocol_type = "raw" expected_serial.target_port = serial_dev.target_port if serial_elem is None: test.fail("Expect exist serial device, " "but found none.") cur_serial = serial_cls.new_from_element(serial_elem) if target_type == 'pci-serial': if cur_serial.address is None: test.fail("Expect serial device address is not assigned") else: logging.debug("Serial address is: %s", cur_serial.address) logging.debug("Expected serial XML is:\n%s", expected_serial) logging.debug("Current serial XML is:\n%s", cur_serial) # Compare current serial and console with oracle. if target_type != 'pci-serial' and not expected_serial == cur_serial: # "==" has been override test.fail("Expect serial device:\n%s\nBut got:\n " "%s" % (expected_serial, cur_serial)) def prepare_serial_console(): """ Prepare serial console to connect with guest serial according to the serial type """ is_server = console_type == 'server' if serial_type == 'unix': # Unix socket path should match SELinux label socket_path = '/var/lib/libvirt/qemu/virt-test' console = Console('unix', socket_path, is_server) elif serial_type == 'tls': host = '127.0.0.1' service = 5556 console = Console('tls', (host, service), is_server, custom_pki_path) elif serial_type == 'tcp': host = '127.0.0.1' service = 2445 console = Console('tcp', (host, service), is_server) elif serial_type == 'udp': host = '127.0.0.1' service = 2445 console = Console('udp', (host, service), is_server) elif serial_type == 'file': socket_path = '/var/log/libvirt/virt-test' console = Console('file', socket_path, is_server) elif serial_type == 'pipe': socket_path = '/tmp/virt-test' console = Console('pipe', socket_path, is_server) else: logging.debug("Serial type %s don't support console test yet.", serial_type) console = None return console def check_qemu_cmdline(): """ Check if VM's qemu command line meets expectation. """ cmdline = open('/proc/%s/cmdline' % vm.get_pid()).read() logging.debug('Qemu command line: %s', cmdline) options = cmdline.split('\x00') ser_opt = None con_opt = None ser_dev = None con_dev = None exp_ser_opt = None exp_ser_dev = None exp_con_opt = None exp_con_dev = None # Get serial and console options from qemu command line for idx, opt in enumerate(options): if opt == '-chardev': if 'id=charserial' in options[idx + 1]: ser_opt = options[idx + 1] ser_dev = options[idx + 3] logging.debug('Serial option is: %s', ser_opt) logging.debug('Serial device option is: %s', ser_dev) if 'id=charconsole' in options[idx + 1]: con_opt = options[idx + 1] con_dev = options[idx + 3] logging.debug('Console option is: %s', con_opt) logging.debug('Console device option is: %s', con_dev) # Get expected serial and console options from params if serial_type == 'dev': ser_type = 'tty' elif serial_type in ['unix', 'tcp', 'tls']: ser_type = 'socket' else: ser_type = serial_type exp_ser_opts = [ser_type, 'id=charserial0'] if serial_type in ['dev', 'file', 'pipe', 'unix']: for source in serial_dev.sources: if 'path' in source: path = source['path'] if serial_type == 'file': # Use re to make this fdset number flexible exp_ser_opts.append('path=/dev/fdset/\d+') exp_ser_opts.append('append=on') elif serial_type == 'unix': # Use re to make this fd number flexible exp_ser_opts.append('fd=\d+') else: exp_ser_opts.append('path=%s' % path) elif serial_type in ['tcp', 'tls']: for source in serial_dev.sources: if 'host' in source: host = source['host'] if 'service' in source: port = source['service'] exp_ser_opts.append('host=%s' % host) exp_ser_opts.append('port=%s' % port) elif serial_type in ['udp']: localaddr = '' localport = '0' for source in serial_dev.sources: if source['mode'] == 'connect': if 'host' in source: host = source['host'] if 'service' in source: port = source['service'] else: if 'host' in source: localaddr = source['host'] if 'service' in source: localport = source['service'] exp_ser_opts.append('host=%s' % host) exp_ser_opts.append('port=%s' % port) exp_ser_opts.append('localaddr=%s' % localaddr) exp_ser_opts.append('localport=%s' % localport) if serial_type in ['unix', 'tcp', 'udp', 'tls']: for source in serial_dev.sources: if 'mode' in source: mode = source['mode'] if mode == 'bind': exp_ser_opts.append('server') exp_ser_opts.append('nowait') if serial_type == 'tls': exp_ser_opts.append('tls-creds=objcharserial0_tls0') exp_ser_opt = ','.join(exp_ser_opts) if "ppc" in platform.machine(): exp_ser_devs = ['spapr-vty', 'chardev=charserial0', 'reg=0x30000000'] if libvirt_version.version_compare(3, 9, 0): exp_ser_devs.insert(2, 'id=serial0') else: logging.debug('target_type: %s', target_type) if target_type == 'pci-serial': exp_ser_devs = ['pci-serial', 'chardev=charserial0', 'id=serial0', 'bus=pci.\d+', 'addr=0x\d+'] else: exp_ser_devs = ['isa-serial', 'chardev=charserial0', 'id=serial0'] exp_ser_dev = ','.join(exp_ser_devs) if console_target_type != 'serial' or serial_type == 'spicevmc': exp_ser_opt = None exp_ser_dev = None logging.debug("exp_ser_opt: %s", exp_ser_opt) logging.debug("ser_opt: %s", ser_opt) # Check options against expectation if exp_ser_opt is not None and re.match(exp_ser_opt, ser_opt) is None: test.fail('Expect get qemu command serial option "%s", ' 'but got "%s"' % (exp_ser_opt, ser_opt)) if exp_ser_dev is not None and ser_dev is not None \ and re.match(exp_ser_dev, ser_dev) is None: test.fail( 'Expect get qemu command serial device option "%s", ' 'but got "%s"' % (exp_ser_dev, ser_dev)) if console_target_type == 'virtio': exp_con_opts = [serial_type, 'id=charconsole1'] exp_con_opt = ','.join(exp_con_opts) exp_con_devs = [] if console_target_type == 'virtio': exp_con_devs.append('virtconsole') exp_con_devs += ['chardev=charconsole1', 'id=console1'] exp_con_dev = ','.join(exp_con_devs) if con_opt != exp_con_opt: test.fail( 'Expect get qemu command console option "%s", ' 'but got "%s"' % (exp_con_opt, con_opt)) if con_dev != exp_con_dev: test.fail( 'Expect get qemu command console device option "%s", ' 'but got "%s"' % (exp_con_dev, con_dev)) def check_serial_console(console, username, password): """ Check whether console works properly by read the result for read only console and login and emit a command for read write console. """ if serial_type in ['file', 'tls']: _, output = console.read_until_output_matches(['.*[Ll]ogin:']) else: console.wait_for_login(username, password) status, output = console.cmd_status_output('pwd') logging.debug("Command status: %s", status) logging.debug("Command output: %s", output) def cleanup(objs_list): """ Clean up test environment """ if serial_type == 'file': if os.path.exists('/var/log/libvirt/virt-test'): os.remove('/var/log/libvirt/virt-test') # recovery test environment for obj in objs_list: obj.auto_recover = True del obj def get_console_type(): """ Check whether console should be started before or after guest starting. """ if serial_type in ['tcp', 'unix', 'udp', 'tls']: for source in serial_dev.sources: if 'mode' in source and source['mode'] == 'connect': return 'server' return 'client' elif serial_type in ['file']: return 'client' elif serial_type in ['pipe']: return 'server' serial_type = params.get('serial_type', 'pty') sources_str = params.get('serial_sources', '') username = params.get('username') password = params.get('password') console_target_type = params.get('console_target_type', 'serial') target_type = params.get('target_type', 'isa-serial') console_target_port = params.get('console_target_port', '0') second_serial_console = params.get('second_serial_console', 'no') == 'yes' custom_pki_path = params.get('custom_pki_path', '/etc/pki/libvirt-chardev') auto_recover = params.get('auto_recover', 'no') client_pwd = params.get('client_pwd', None) server_pwd = params.get('server_pwd', None) args_list = [client_pwd, server_pwd] for arg in args_list: if arg and arg.count("ENTER.YOUR."): raise test.fail("Please assign a value for %s!" % arg) vm_name = params.get('main_vm', 'virt-tests-vm1') vm = env.get_vm(vm_name) # it's used to clean up TLS objs later objs_list = [] vm_xml = VMXML.new_from_inactive_dumpxml(vm_name) vm_xml_backup = vm_xml.copy() try: if console_target_type != 'virtio': vm_xml.remove_all_device_by_type('serial') vm_xml.remove_all_device_by_type('console') if serial_type == "tls": test_dict = dict(params) tls_obj = TLSConnection(test_dict) if auto_recover == "yes": objs_list.append(tls_obj) tls_obj.conn_setup(False, True, True) serial_dev = prepare_serial_device() if console_target_type == 'serial' or second_serial_console: logging.debug('Serial device:\n%s', serial_dev) vm_xml.add_device(serial_dev) if serial_type == "spiceport": spice_graphics_dev = prepare_spice_graphics_device() logging.debug('Spice graphics device:\n%s', spice_graphics_dev) vm_xml.add_device(spice_graphics_dev) console_dev = prepare_console_device() logging.debug('Console device:\n%s', console_dev) vm_xml.add_device(console_dev) if second_serial_console: console_target_type = 'serial' console_target_port = '1' console_dev = prepare_console_device() logging.debug('Console device:\n%s', console_dev) vm_xml.add_device(console_dev) vm_xml.undefine() res = virsh.define(vm_xml.xml) if res.stderr.find(r'Only the first console can be a serial port'): logging.debug("Test only the first console can be a serial" "succeeded") return test.fail("Test only the first console can be serial failed.") console_type = get_console_type() if not define_and_check(): logging.debug("Can't define the VM, exiting.") return if console_target_type != 'virtio': check_xml() expected_fails = [] if serial_type == 'nmdm' and platform.system() != 'FreeBSD': expected_fails.append("unsupported chardev 'nmdm'") # Prepare console before start when console is server if console_type == 'server': console = prepare_serial_console() res = virsh.start(vm_name) libvirt.check_result(res, expected_fails, []) if res.exit_status: logging.debug("Can't start the VM, exiting.") return check_qemu_cmdline() # Prepare console after start when console is client if console_type == 'client': console = prepare_serial_console() if (console_type is not None and serial_type != 'tls' and console_type != 'server'): check_serial_console(console, username, password) vm.destroy() finally: cleanup(objs_list) vm_xml_backup.sync()
def run(test, params, env): def check_vm_network_accessed(): """ The operations to the VM need to be done before migration happens """ # 1. Confirm local VM can be accessed through network. logging.info("Check local VM network connectivity before migrating") s_ping, o_ping = utils_test.ping(vm.get_address(), count=10, timeout=20) logging.info(o_ping) if s_ping != 0: test.error("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) logging.info("Migration out: %s", results_stdout_52lts(migration_res).strip()) logging.info("Migration error: %s", results_stderr_52lts(migration_res).strip()) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) run_remote_cmd(cmd) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_remote_cmd(cmd): """ A function to run a command on remote host. :param cmd: the command to be executed :return: CmdResult object """ remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmdResult = remote_runner.run(cmd, ignore_status=True) if cmdResult.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, results_stderr_52lts(cmdResult).strip())) return cmdResult def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"ignore_status": True, "debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") stress_in_vm = "yes" == params.get("stress_in_vm", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False asynch_migration = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. orig_config_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Change the disk of the vm to shared disk libvirt.set_vm_disk(vm, params) if extra.count("--tls"): qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) if not vm.is_alive(): vm.start() vm_session = vm.wait_for_login() check_vm_network_accessed() if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): asynch_migration = True func_name = check_timeout_postcopy if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options and not extra.count(postcopy_options): extra = "%s %s" % (extra, postcopy_options) if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) if int(mig_result.exit_status) != 0: test.fail(results_stderr_52lts(mig_result).strip()) if check_complete_job: search_str_domjobinfo = params.get("search_str_domjobinfo", None) opts = "--completed" args = vm_name + " " + opts check_virsh_command_and_option("domjobinfo", opts) jobinfo = results_stdout_52lts( virsh.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Local job info on completion:\n%s", jobinfo) if extra.count("comp-xbzrle-cache") and search_str_domjobinfo: search_str_domjobinfo = "%s %s" % (search_str_domjobinfo, cache // 1024) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): test.fail("Fail to search '%s' on local:\n%s" % (search_str_domjobinfo, jobinfo)) # Check remote host remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = results_stdout_52lts( remote_virsh_session.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Remote job info on completion:\n%s", jobinfo) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): remote_virsh_session.close_session() test.fail("Fail to search '%s' on remote:\n%s" % (search_str_domjobinfo, jobinfo)) remote_virsh_session.close_session() if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) run_remote_cmd(cmd) except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls"): logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': hpt_xml = vm_xml.VMFeaturesHptXML() hpt_xml.resizing = value features_xml.hpt = hpt_xml elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get("stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1, opts=""): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, extra=opts, **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, extra=opts, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def search_jobinfo_output(jobinfo, items_to_check, postcopy_req=False): """ Check the results of domjobinfo :param jobinfo: cmdResult object :param items_to_check: expected value :param postcopy_req: True for postcopy migration and False for precopy :return: False if not found """ expected_value = copy.deepcopy(items_to_check) logging.debug("The items_to_check is %s", expected_value) for item in jobinfo.splitlines(): item_key = item.strip().split(':')[0] if "all_items" in expected_value and len(item_key) > 0: # "Time elapsed w/o network" and "Downtime w/o network" # have a chance to be missing, it is normal if item_key in ['Downtime w/o network', 'Time elapsed w/o network']: continue if expected_value["all_items"][0] == item_key: del expected_value["all_items"][0] else: test.fail("The item '%s' should be '%s'" % (item_key, expected_value["all_items"][0])) if item_key in expected_value: item_value = ':'.join(item.strip().split(':')[1:]).strip() if item_value != expected_value.get(item_key): test.fail("The value of {} is {} which is not " "expected".format(item_key, item_value)) else: del expected_value[item_key] if postcopy_req and item_key == "Postcopy requests": if int(item.strip().split(':')[1]) <= 0: test.fail("The value of Postcopy requests is incorrect") # Check if all the items in expect_dict checked or not if "all_items" in expected_value: if len(expected_value["all_items"]) > 0: test.fail("Missing item: {} from all_items" .format(expected_value["all_items"])) else: del expected_value["all_items"] if len(expected_value) != 0: test.fail("Missing item: {}".format(expected_value)) def set_migratepostcopy(): """ Switch to postcopy during migration """ res = virsh.migrate_postcopy(vm_name) logging.debug("Command output: %s", res) if not utils_misc.wait_for( lambda: migration_test.check_vm_state(vm_name, "paused", "post-copy"), 10): test.fail("vm status is expected to 'paused (post-copy)'") def check_domjobinfo_output(option="", is_mig_compelete=False): """ Check all items in domjobinfo of the guest on both remote and local :param option: options for domjobinfo :param is_mig_compelete: False for domjobinfo checking during migration, True for domjobinfo checking after migration :raise: test.fail if the value of given item is unexpected """ expected_list_during_mig = ["Job type", "Operation", "Time elapsed", "Data processed", "Data remaining", "Data total", "Memory processed", "Memory remaining", "Memory total", "Memory bandwidth", "Dirty rate", "Page size", "Iteration", "Constant pages", "Normal pages", "Normal data", "Expected downtime", "Setup time"] if libvirt_version.version_compare(4, 10, 0): expected_list_during_mig.insert(13, "Postcopy requests") expected_list_after_mig_src = copy.deepcopy(expected_list_during_mig) expected_list_after_mig_src[-2] = 'Total downtime' expected_list_after_mig_dest = copy.deepcopy(expected_list_after_mig_src) # Check version in remote if not expected_list_after_mig_dest.count("Postcopy requests"): remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, "#") if libvirt_version.version_compare(4, 10, 0, session=remote_session): expected_list_after_mig_dest.insert(14, "Postcopy requests") remote_session.close() expect_dict = {"src_notdone": {"Job type": "Unbounded", "Operation": "Outgoing migration", "all_items": expected_list_during_mig}, "dest_notdone": {"error": "Operation not supported: mig" "ration statistics are availab" "le only on the source host"}, "src_done": {"Job type": "Completed", "Operation": "Outgoing migration", "all_items": expected_list_after_mig_src}, "dest_done": {"Job type": "Completed", "Operation": "Incoming migration", "all_items": expected_list_after_mig_dest}} pc_opt = False if postcopy_options: pc_opt = True if is_mig_compelete: expect_dict["dest_done"].clear() expect_dict["dest_done"]["Job type"] = "None" else: set_migratepostcopy() vm_ref = '{}{}'.format(vm_name, option) src_jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) cmd = "virsh domjobinfo {} {}".format(vm_name, option) dest_jobinfo = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if not is_mig_compelete: search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_notdone"], postcopy_req=pc_opt) search_jobinfo_output(dest_jobinfo.stderr, expect_dict["dest_notdone"]) else: search_jobinfo_output(src_jobinfo.stdout, expect_dict["src_done"]) search_jobinfo_output(dest_jobinfo.stdout, expect_dict["dest_done"]) def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def run_time(init_time=2): """ Compare the duration of func to an expected one :param init_time: Expected run time :raise: test.fail if func takes more than init_time(second) """ def check_time(func): def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) duration = time.time() - start if duration > init_time: test.fail("It takes too long to check {}. The duration is " "{}s which should be less than {}s" .format(func.__doc__, duration, init_time)) return result return wrapper return check_time def run_domstats(vm_name): """ Run domstats and domstate during migration in source and destination :param vm_name: VM name :raise: test.fail if domstats does not return in 2s or domstate is incorrect """ @run_time() def check_source_stats(vm_name): """domstats in source""" vm_stats = virsh.domstats(vm_name) logging.debug("domstats in source: {}".format(vm_stats)) @run_time() def check_dest_stats(vm_name): """domstats in target""" cmd = "virsh domstats {}".format(vm_name) dest_stats = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) logging.debug("domstats in destination: {}".format(dest_stats)) expected_remote_state = "paused" expected_source_state = ["paused", "running"] if postcopy_options: set_migratepostcopy() expected_remote_state = "running" expected_source_state = ["paused"] check_source_stats(vm_name) vm_stat = virsh.domstate(vm_name, ignore_status=False) if ((not len(vm_stat.stdout.split())) or vm_stat.stdout.split()[0] not in expected_source_state): test.fail("Incorrect VM stat on source machine: {}" .format(vm_stat.stdout)) check_dest_stats(vm_name) cmd = "virsh domstate {}".format(vm_name) remote_vm_state = remote.run_remote_cmd(cmd, cmd_parms, runner_on_target, ignore_status=False) if ((not len(remote_vm_state.stdout.split())) or remote_vm_state.stdout.split()[0] != expected_remote_state): test.fail("Incorrect VM stat on destination machine: {}" .format(remote_vm_state.stdout)) else: logging.debug("vm stat on destination: {}".format(remote_vm_state)) if postcopy_options: vm_stat = virsh.domstate(vm_name, ignore_status=False) if ((not len(vm_stat.stdout.split())) or vm_stat.stdout.split()[0] != "paused"): test.fail("Incorrect VM stat on source machine: {}" .format(vm_stat.stdout)) def kill_qemu_target(): """ Kill qemu process on target host during Finish Phase of migration :raise: test.fail if domstate is not "post-copy failed" after qemu killed """ if not vm.is_qemu(): test.cancel("This case is qemu guest only.") set_migratepostcopy() emulator = new_xml.get_devices('emulator')[0] logging.debug("emulator is %s", emulator.path) cmd = 'pkill -9 {}'.format(os.path.basename(emulator.path)) runner_on_target.run(cmd) if not utils_misc.wait_for( lambda: migration_test.check_vm_state(vm_name, "paused", "post-copy failed"), 60): test.fail("vm status is expected to 'paused (post-copy failed)'") def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) elif action == 'converge': check_converge(params) elif action == 'domjobinfo_output_all': check_domjobinfo_output() elif action == 'checkdomstats': run_domstats(vm_name) elif action == 'killqemutarget': kill_qemu_target() time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug("{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = {'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name} channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail("After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found" % (timeout, vm_state)) remote_virsh_session.close_session() def check_converge(params): """ Handle option '--auto-converge --auto-converge-initial --auto-converge-increment '. 'Auto converge throttle' in domjobinfo should start with the initial value and increase with correct increment and max value is 99. :param params: The parameters used :raise: exceptions.TestFail when unexpected or no throttle is found """ initial = int(params.get("initial", 20)) increment = int(params.get("increment", 10)) max_converge = int(params.get("max_converge", 99)) allow_throttle_list = [initial + count * increment for count in range(0, (100 - initial) // increment + 1) if (initial + count * increment) < 100] allow_throttle_list.append(max_converge) logging.debug("The allowed 'Auto converge throttle' value " "is %s", allow_throttle_list) throttle = 0 jobtype = "None" while throttle < 100: cmd_result = virsh.domjobinfo(vm_name, debug=True, ignore_status=True) if cmd_result.exit_status: logging.debug(cmd_result.stderr) # Check if migration is completed if "domain is not running" in cmd_result.stderr: args = vm_name + " --completed" cmd_result = virsh.domjobinfo(args, debug=True, ignore_status=True) if cmd_result.exit_status: test.error("Failed to get domjobinfo and domjobinfo " "--completed: %s" % cmd_result.stderr) else: test.error("Failed to get domjobinfo: %s" % cmd_result.stderr) jobinfo = cmd_result.stdout for line in jobinfo.splitlines(): key = line.split(':')[0] if key.count("Job type"): jobtype = line.split(':')[-1].strip() elif key.count("Auto converge throttle"): throttle = int(line.split(':')[-1].strip()) logging.debug("Auto converge throttle:%s", str(throttle)) if throttle and throttle not in allow_throttle_list: test.fail("Invalid auto converge throttle " "value '%s'" % throttle) if throttle == 99: logging.debug("'Auto converge throttle' reaches maximum " "allowed value 99") break if jobtype == "None" or jobtype == "Completed": logging.debug("Jobtype:%s", jobtype) if not throttle: test.fail("'Auto converge throttle' is " "not found in the domjobinfo") break time.sleep(5) def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug("%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def update_config_file(config_type, new_conf, remote_host=True, params=None): """ Update the specified configuration file with dict :param config_type: Like libvirtd, qemu :param new_conf: The str including new configuration :param remote_host: True to also update in remote host :param params: The dict including parameters to connect remote host :return: utils_config.LibvirtConfigCommon object """ logging.debug("Update configuration file") cleanup_libvirtd_log(log_file) config_dict = eval(new_conf) updated_conf = libvirt.customize_libvirt_config(config_dict, config_type=config_type, remote_host=remote_host, extra_params=params) return updated_conf def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") check_domjobinfo_results = "yes" == params.get("check_domjobinfo_results") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None # remote shell session remote_session = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu.conf qemu_conf_dict = params.get("qemu_conf_dict") if qemu_conf_dict: qemu_conf = update_config_file('qemu', qemu_conf_dict, params=params) # Setup libvirtd libvirtd_conf_dict = params.get("libvirtd_conf_dict") if libvirtd_conf_dict: libvirtd_conf = update_config_file('libvirtd', libvirtd_conf_dict, params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) if low_speed: control_migrate_speed(int(low_speed)) if postcopy_options and libvirt_version.version_compare(5, 0, 0): control_migrate_speed(int(low_speed), opts=postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug("Remote guest uses {} for channel device".format(host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if check_domjobinfo_results: check_domjobinfo_output(option=opts, is_mig_compelete=True) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail("%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if remote_session: remote_session.close() # Delete files on target # Killing qemu process on target may lead a problem like # qemu process becomes a zombie process whose ppid is 1. # As a workaround, have to remove the files under # /var/run/libvirt/qemu to make libvirt work. if vm.is_qemu(): dest_pid_files = os.path.join("/var/run/libvirt/qemu", vm_name + '*') cmd = "rm -f %s" % dest_pid_files logging.debug("Delete remote pid files '%s'", dest_pid_files) remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) for update_conf in [libvirtd_conf, qemu_conf]: if update_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=update_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test virt-admin server-clients-set 2) Change max_clients to a new value; 3) get the current clients info; 4) check whether the clients info is correct; 5) try to connect other client onto the server; 6) check whether the above connection status is correct. """ def clients_info(server): """ check the attributes by server-clients-set. 1) get the output returned by server-clients-set; 2) split the output to get a dictionary of those attributes; :params server: print the info of the clients connecting to this server :return: a dict obtained by transforming the result_info """ result_info = virt_admin.srv_clients_info(server, ignore_status=True, debug=True) out = result_info.stdout_text.strip().splitlines() out_split = [item.split(':') for item in out] out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) return out_dict def chk_connect_to_daemon(connect_able): try: virsh_instance.append(virsh.VirshPersistent(**remote_virsh_dargs)) except Exception as info: if connect_able == "yes": test.fail("Connection to daemon is not success, error:\n %s" % info) else: logging.info("Connections to daemon should not success, " "this is a correct test result!") else: if connect_able == "yes": logging.info("Connections to daemon is successful, " "this is a correct test result!") else: test.fail("error: Connection to daemon should not success! " "Check the attributes.") server_name = params.get("server_name") is_positive = params.get("is_positive") == "yes" options_ref = params.get("options_ref") nclients_max = params.get("nclients_maxi") nclients = params.get("nclients") nclients_unauth_max = params.get("nclients_unauth_maxi") connect_able = params.get("connect_able") options_test_together = params.get("options_test_together") server_ip = params["server_ip"] = params.get("local_ip") server_user = params["server_user"] = params.get("local_user", "root") server_pwd = params["server_pwd"] = params.get("local_pwd") client_ip = params["client_ip"] = params.get("remote_ip") client_pwd = params["client_pwd"] = params.get("remote_pwd") client_user = params["server_user"] = params.get("remote_user", "root") tls_port = params.get("tls_port", "16514") tls_uri = "qemu+tls://%s:%s/system" % (server_ip, tls_port) tls_obj = None remote_virsh_dargs = { 'remote_ip': client_ip, 'remote_user': client_user, 'remote_pwd': client_pwd, 'uri': tls_uri, 'ssh_remote_auth': True } if not server_name: server_name = virt_admin.check_server_name() config = virt_admin.managed_daemon_config() daemon = utils_libvirtd.Libvirtd("virtproxyd") virsh_instance = [] try: if nclients: tls_obj = TLSConnection(params) tls_obj.conn_setup() tls_obj.auto_recover = True utils_iptables.Firewall_cmd().add_port(tls_port, 'tcp', permanent=True) if options_ref: if "max-clients" in options_ref: if nclients: if int(nclients_max) > int(nclients): config.max_clients = nclients config.max_anonymous_clients = nclients_unauth_max daemon.restart() for _ in range(int(nclients)): virsh_instance.append( virsh.VirshPersistent(**remote_virsh_dargs)) result = virt_admin.srv_clients_set( server_name, max_clients=nclients_max, ignore_status=True, debug=True) elif int(nclients_max) <= int(nclients): for _ in range(int(nclients)): virsh_instance.append( virsh.VirshPersistent(**remote_virsh_dargs)) result = virt_admin.srv_clients_set( server_name, max_clients=nclients_max, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) else: result = virt_admin.srv_clients_set( server_name, max_clients=nclients_max, ignore_status=True, debug=True) elif "max-unauth-clients" in options_ref: result = virt_admin.srv_clients_set( server_name, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) elif options_test_together: result = virt_admin.srv_clients_set( server_name, max_clients=nclients_max, max_unauth_clients=nclients_unauth_max, ignore_status=True, debug=True) outdict = clients_info(server_name) if result.exit_status: if is_positive: test.fail("This operation should success " "but failed! output:\n%s " % result) else: logging.debug("This failure is expected!") else: if is_positive: if options_ref: if "max-clients" in options_ref: if outdict["nclients_max"] != nclients_max: test.fail("attributes set by server-clients-set " "is not correct!") if nclients: chk_connect_to_daemon(connect_able) elif "max_unauth_clients" in options_ref: if outdict[ "nclients_unauth_max"] != nclients_unauth_max: test.fail("attributes set by server-clients-set " "is not correct!") elif options_test_together: if (outdict["nclients_max"] != nclients_max or outdict["nclients_unauth_max"] != nclients_unauth_max): test.fail("attributes set by server-clients-set " "is not correct!") else: test.fail("This is a negative case, should get failure.") finally: for session in virsh_instance: session.close_session() config.restore() daemon.restart() utils_iptables.Firewall_cmd().remove_port(tls_port, 'tcp', permanent=True)
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. virsh_args.update({"ignore_status": True}) migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get("stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def control_migrate_speed(to_speed=1): """ Control migration duration :param to_speed: the speed value in Mbps to be set for migration :return int: the new migration speed after setting """ virsh_args.update({"ignore_status": False}) old_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("Current migration speed is %s MiB/s\n", old_speed.stdout.strip()) logging.debug("Set migration speed to %d MiB/s\n", to_speed) cmd_result = virsh.migrate_setspeed(vm_name, to_speed, "", **virsh_args) actual_speed = virsh.migrate_getspeed(vm_name, **virsh_args) logging.debug("New migration speed is %s MiB/s\n", actual_speed.stdout.strip()) return int(actual_speed.stdout.strip()) def check_setspeed(params): """ Set/get migration speed :param params: the parameters used :raise: test.fail if speed set does not take effect """ expected_value = int(params.get("migrate_speed", '41943040')) // (1024 * 1024) actual_value = control_migrate_speed(to_speed=expected_value) params.update({'compare_to_value': actual_value}) if actual_value != expected_value: test.fail("Migration speed is expected to be '%d MiB/s', but '%d MiB/s' " "found" % (expected_value, actual_value)) def check_domjobinfo(params, option=""): """ Check given item in domjobinfo of the guest is as expected :param params: the parameters used :param option: options for domjobinfo :raise: test.fail if the value of given item is unexpected """ def search_jobinfo(jobinfo): """ Find value of given item in domjobinfo :param jobinfo: cmdResult object :raise: test.fail if not found """ for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) logging.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): test.fail("{} {} has too much difference from " "{}".format(jobinfo_item, groups[0], compare_to_value)) break jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") logging.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return vm_ref = '{}{}'.format(vm_name, option) jobinfo = virsh.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) check_domjobinfo_remote = params.get("check_domjobinfo_remote") if check_domjobinfo_remote: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) jobinfo = remote_virsh_session.domjobinfo(vm_ref, **virsh_args) search_jobinfo(jobinfo) remote_virsh_session.close_session() def check_maxdowntime(params): """ Set/get migration maxdowntime :param params: the parameters used :raise: test.fail if maxdowntime set does not take effect """ expected_value = int(float(params.get("migrate_maxdowntime", '0.3')) * 1000) virsh_args.update({"ignore_status": False}) old_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("Current migration maxdowntime is %d ms", old_value) logging.debug("Set migration maxdowntime to %d ms", expected_value) virsh.migrate_setmaxdowntime(vm_name, expected_value, **virsh_args) actual_value = int(virsh.migrate_getmaxdowntime(vm_name).stdout.strip()) logging.debug("New migration maxdowntime is %d ms", actual_value) if actual_value != expected_value: test.fail("Migration maxdowntime is expected to be '%d ms', but '%d ms' " "found" % (expected_value, actual_value)) params.update({'compare_to_value': actual_value}) def do_actions_during_migrate(params): """ The entry point to execute action list during migration :param params: the parameters used """ actions_during_migration = params.get("actions_during_migration") if not actions_during_migration: return for action in actions_during_migration.split(","): if action == 'setspeed': check_setspeed(params) elif action == 'domjobinfo': check_domjobinfo(params) elif action == 'setmaxdowntime': check_maxdowntime(params) time.sleep(3) def attach_channel_xml(): """ Create channel xml and attach it to guest configuration """ # Check if pty channel exists already for elem in new_xml.devices.by_device_tag('channel'): if elem.type_name == channel_type_name: logging.debug("{0} channel already exists in guest. " "No need to add new one".format(channel_type_name)) return params = {'channel_type_name': channel_type_name, 'target_type': target_type, 'target_name': target_name} channel_xml = libvirt.create_channel_xml(params) virsh.attach_device(domain_opt=vm_name, file_opt=channel_xml.xml, flagstr="--config", ignore_status=False) logging.debug("New VMXML with channel:\n%s", virsh.dumpxml(vm_name)) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts(remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail("After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug("%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item def check_migration_res(result): """ Check if the migration result is as expected :param result: the output of migration :raise: test.fail if test is failed """ logging.info("Migration out: %s", results_stdout_52lts(result).strip()) logging.info("Migration error: %s", results_stderr_52lts(result).strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected if not re.search(err_msg, results_stderr_52lts(result).strip()): test.fail("Can not find the expected patterns '%s' in " "output '%s'" % (err_msg, results_stderr_52lts(result).strip())) else: logging.debug("It is the expected error message") else: if int(result.exit_status) != 0: logging.debug("Migration failure is expected result") else: test.fail("Migration success is unexpected result") else: if int(result.exit_status) != 0: test.fail(results_stderr_52lts(result).strip()) check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"debug": True} virsh_opt = params.get("virsh_opt", "") server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) asynch_migration = "yes" == params.get("asynch_migrate", "no") grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") disable_verify_peer = "yes" == params.get("disable_verify_peer", "no") status_error = "yes" == params.get("status_error", "no") stress_in_vm = "yes" == params.get("stress_in_vm", "no") low_speed = params.get("low_speed", None) remote_virsh_dargs = {'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True} hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) # For pty channel test add_channel = "yes" == params.get("add_channel", "no") channel_type_name = params.get("channel_type_name", None) target_type = params.get("target_type", None) target_name = params.get("target_name", None) cmd_run_in_remote_guest = params.get("cmd_run_in_remote_guest", None) cmd_run_in_remote_guest_1 = params.get("cmd_run_in_remote_guest_1", None) cmd_run_in_remote_host = params.get("cmd_run_in_remote_host", None) cmd_run_in_remote_host_1 = params.get("cmd_run_in_remote_host_1", None) cmd_run_in_remote_host_2 = params.get("cmd_run_in_remote_host_2", None) # For qemu command line checking qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) # params for cache matrix test cache = params.get("cache") remove_cache = "yes" == params.get("remove_cache", "no") err_msg = params.get("err_msg") arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Create a remote runner for later use runner_on_target = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) # Change the configuration files if needed before starting guest # For qemu.conf if extra.count("--tls"): # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() if not disable_verify_peer: qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config(libvirtd_conf_dict, remote_host=True, extra_params=params) # Prepare required guest xml before starting guest if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if add_channel: attach_channel_xml() if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) if cache: params["driver_cache"] = cache if remove_cache: params["enable_cache"] = "no" # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check qemu command line after guest is started if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) libvirt.check_qemu_cmd_line(check_content) # Check local guest network connection before migration vm_session = vm.wait_for_login() check_vm_network_accessed() # Preparation for the running guest before migration if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if low_speed: control_migrate_speed(int(low_speed)) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): func_name = check_timeout_postcopy if params.get("actions_during_migration"): func_name = do_actions_during_migrate if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options: extra = "%s %s" % (extra, postcopy_options) # Execute migration process if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_opt, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) check_migration_res(mig_result) if add_channel: # Get the channel device source path of remote guest if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) file_path = tempfile.mktemp(dir=data_dir.get_tmp_dir()) remote_virsh_session.dumpxml(vm_name, to_file=file_path, debug=True, ignore_status=True) local_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) local_vmxml.xmltreefile = xml_utils.XMLTreeFile(file_path) for elem in local_vmxml.devices.by_device_tag('channel'): logging.debug("Found channel device {}".format(elem)) if elem.type_name == channel_type_name: host_source = elem.source.get('path') logging.debug("Remote guest uses {} for channel device".format(host_source)) break remote_virsh_session.close_session() if not host_source: test.fail("Can not find source for %s channel on remote host" % channel_type_name) # Prepare to wait for message on remote host from the channel cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} cmd_result = remote.run_remote_cmd(cmd_run_in_remote_host % host_source, cmd_parms, runner_on_target) # Send message from remote guest to the channel file remote_vm_obj = utils_test.RemoteVMManager(cmd_parms) vm_ip = vm.get_address() vm_pwd = params.get("password") remote_vm_obj.setup_ssh_auth(vm_ip, vm_pwd) cmd_result = remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest_1) remote_vm_obj.run_command(vm_ip, cmd_run_in_remote_guest % results_stdout_52lts(cmd_result).strip()) logging.debug("Sending message is done") # Check message on remote host from the channel remote.run_remote_cmd(cmd_run_in_remote_host_1, cmd_parms, runner_on_target) logging.debug("Receiving message is done") remote.run_remote_cmd(cmd_run_in_remote_host_2, cmd_parms, runner_on_target) if check_complete_job: opts = " --completed" check_virsh_command_and_option("domjobinfo", opts) if extra.count("comp-xbzrle-cache"): params.update({'compare_to_value': cache // 1024}) check_domjobinfo(params, option=opts) if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) cmd_parms = {'server_ip': server_ip, 'server_user': server_user, 'server_pwd': server_pwd} remote.run_remote_cmd(cmd, cmd_parms, runner_on_target) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail("%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() if int(mig_result.exit_status) == 0: server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls") and not disable_verify_peer: logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test virsh migrate command. """ def set_feature(vmxml, feature, value): """ Set guest features for PPC :param state: the htm status :param vmxml: guest xml """ features_xml = vm_xml.VMFeaturesXML() if feature == 'hpt': features_xml.hpt_resizing = value elif feature == 'htm': features_xml.htm = value vmxml.features = features_xml vmxml.sync() def trigger_hpt_resize(session): """ Check the HPT order file and dmesg :param session: the session to guest :raise: test.fail if required message is not found """ hpt_order_path = "/sys/kernel/debug/powerpc/hpt_order" hpt_order = session.cmd_output('cat %s' % hpt_order_path).strip() hpt_order = int(hpt_order) logging.info('Current hpt_order is %d', hpt_order) hpt_order += 1 cmd = 'echo %d > %s' % (hpt_order, hpt_order_path) cmd_result = session.cmd_status_output(cmd) result = process.CmdResult(stderr=cmd_result[1], stdout=cmd_result[1], exit_status=cmd_result[0]) libvirt.check_exit_status(result) dmesg = session.cmd('dmesg') dmesg_content = params.get('dmesg_content').split('|') for content in dmesg_content: if content % hpt_order not in dmesg: test.fail("'%s' is missing in dmesg" % (content % hpt_order)) else: logging.info("'%s' is found in dmesg", content % hpt_order) def check_qemu_cmd_line(content, err_ignore=False): """ Check the specified content in the qemu command line :param content: the desired string to search :param err_ignore: True to return False when fail False to raise exception when fail :return: True if exist, False otherwise """ cmd = 'ps -ef|grep qemu|grep -v grep' qemu_line = results_stdout_52lts(process.run(cmd, shell=True)) if content not in qemu_line: if err_ignore: return False else: test.fail("Expected '%s' was not found in " "qemu command line" % content) return True def check_vm_network_accessed(session=None): """ The operations to the VM need to be done before or after migration happens :param session: The session object to the host :raise: test.error when ping fails """ # Confirm local/remote VM can be accessed through network. logging.info("Check VM network connectivity") s_ping, _ = utils_test.ping(vm.get_address(), count=10, timeout=20, output_func=logging.debug, session=session) if s_ping != 0: if session: session.close() test.fail("%s did not respond after %d sec." % (vm.name, 20)) def check_virsh_command_and_option(command, option=None): """ Check if virsh command exists :param command: the command to be checked :param option: the command option to be checked """ msg = "This version of libvirt does not support " if not virsh.has_help_command(command): test.cancel(msg + "virsh command '%s'" % command) if option and not virsh.has_command_help_match(command, option): test.cancel(msg + "virsh command '%s' with option '%s'" % (command, option)) def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"): """ Add multiple devices :param dev_type: the type of the device to be added :param dev_index: the maximum index of the device to be added :param dev_model: the model of the device to be added """ for inx in range(0, int(dev_index) + 1): newcontroller = Controller("controller") newcontroller.type = dev_type newcontroller.index = inx newcontroller.model = dev_model logging.debug("New device is added:\n%s", newcontroller) vm_xml.add_device(newcontroller) vm_xml.sync() def do_migration(vm, dest_uri, options, extra): """ Execute the migration with given parameters :param vm: the guest to be migrated :param dest_uri: the destination uri for migration :param options: options next to 'migrate' command :param extra: options in the end of the migrate command line :return: CmdResult object """ logging.info("Sleeping 10 seconds before migration") time.sleep(10) # Migrate the guest. migration_res = vm.migrate(dest_uri, options, extra, **virsh_args) logging.info("Migration out: %s", results_stdout_52lts(migration_res).strip()) logging.info("Migration error: %s", results_stderr_52lts(migration_res).strip()) if int(migration_res.exit_status) != 0: logging.error("Migration failed for %s.", vm_name) return migration_res if vm.is_alive(): # vm.connect_uri was updated logging.info("VM is alive on destination %s.", dest_uri) else: test.fail("VM is not alive on destination %s" % dest_uri) # Throws exception if console shows panic message vm.verify_kernel_crash() return migration_res def cleanup_libvirtd_log(log_file): """ Remove existing libvirtd log file on source and target host. :param log_file: log file with absolute path """ if os.path.exists(log_file): logging.debug("Delete local libvirt log file '%s'", log_file) os.remove(log_file) cmd = "rm -f %s" % log_file logging.debug("Delete remote libvirt log file '%s'", log_file) run_remote_cmd(cmd) def cleanup_dest(vm): """ Clean up the destination host environment when doing the uni-direction migration. :param vm: the guest to be cleaned up """ logging.info("Cleaning up VMs on %s", vm.connect_uri) try: if virsh.domain_exists(vm.name, uri=vm.connect_uri): vm_state = vm.state() if vm_state == "paused": vm.resume() elif vm_state == "shut off": vm.start() vm.destroy(gracefully=False) if vm.is_persistent(): vm.undefine() except Exception as detail: logging.error("Cleaning up destination failed.\n%s", detail) def run_remote_cmd(cmd): """ A function to run a command on remote host. :param cmd: the command to be executed :return: CmdResult object """ remote_runner = remote.RemoteRunner(host=server_ip, username=server_user, password=server_pwd) cmdResult = remote_runner.run(cmd, ignore_status=True) if cmdResult.exit_status: test.fail("Failed to run '%s' on remote: %s" % (cmd, results_stderr_52lts(cmdResult).strip())) return cmdResult def run_stress_in_vm(): """ The function to load stress in VM """ stress_args = params.get( "stress_args", "--cpu 8 --io 4 " "--vm 2 --vm-bytes 128M " "--timeout 20s") try: vm_session.cmd('stress %s' % stress_args) except Exception as detail: logging.debug(detail) def check_timeout_postcopy(params): """ Check the vm state on target host after timeout when --postcopy and --timeout-postcopy are used. The vm state is expected as running. :param params: the parameters used """ timeout = int(params.get("timeout_postcopy", 10)) time.sleep(timeout + 1) remote_virsh_session = virsh.VirshPersistent(**remote_virsh_dargs) vm_state = results_stdout_52lts( remote_virsh_session.domstate(vm_name)).strip() if vm_state != "running": remote_virsh_session.close_session() test.fail( "After timeout '%s' seconds, " "the vm state on target host should " "be 'running', but '%s' found", timeout, vm_state) remote_virsh_session.close_session() def get_usable_compress_cache(pagesize): """ Get a number which is bigger than pagesize and is power of two. :param pagesize: the given integer :return: an integer satisfying the criteria """ def calculate(num): result = num & (num - 1) return (result == 0) item = pagesize found = False while (not found): item += 1 found = calculate(item) logging.debug( "%d is smallest one that is bigger than '%s' and " "is power of 2", item, pagesize) return item check_parameters(test, params) # Params for NFS shared storage shared_storage = params.get("migrate_shared_storage", "") if shared_storage == "": default_guest_asset = defaults.get_default_guest_os_info()['asset'] default_guest_asset = "%s.qcow2" % default_guest_asset shared_storage = os.path.join(params.get("nfs_mount_dir"), default_guest_asset) logging.debug("shared_storage:%s", shared_storage) # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) # Params to update disk using shared storage params["disk_type"] = "file" params["disk_source_protocol"] = "netfs" params["mnt_path_name"] = params.get("nfs_mount_dir") # Local variables virsh_args = {"ignore_status": True, "debug": True} server_ip = params.get("server_ip") server_user = params.get("server_user", "root") server_pwd = params.get("server_pwd") extra = params.get("virsh_migrate_extra") options = params.get("virsh_migrate_options") src_uri = params.get("virsh_migrate_connect_uri") dest_uri = params.get("virsh_migrate_desturi") log_file = params.get("libvirt_log", "/var/log/libvirt/libvirtd.log") check_complete_job = "yes" == params.get("check_complete_job", "no") config_libvirtd = "yes" == params.get("config_libvirtd", "no") contrl_index = params.get("new_contrl_index", None) grep_str_remote_log = params.get("grep_str_remote_log", "") grep_str_local_log = params.get("grep_str_local_log", "") stress_in_vm = "yes" == params.get("stress_in_vm", "no") remote_virsh_dargs = { 'remote_ip': server_ip, 'remote_user': server_user, 'remote_pwd': server_pwd, 'unprivileged_user': None, 'ssh_remote_auth': True } hpt_resize = params.get("hpt_resize", None) htm_state = params.get("htm_state", None) qemu_check = params.get("qemu_check", None) xml_check_after_mig = params.get("guest_xml_check_after_mig", None) arch = platform.machine() if any([hpt_resize, contrl_index, htm_state]) and 'ppc64' not in arch: test.cancel("The case is PPC only.") # For TLS tls_recovery = params.get("tls_auto_recovery", "yes") # qemu config qemu_conf_dict = None # libvirtd config libvirtd_conf_dict = None remote_virsh_session = None vm = None vm_session = None libvirtd_conf = None qemu_conf = None mig_result = None test_exception = None is_TestError = False is_TestFail = False is_TestSkip = False asynch_migration = False # Objects to be cleaned up in the end objs_list = [] tls_obj = None # Local variables vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() # For safety reasons, we'd better back up xmlfile. new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = new_xml.copy() if not orig_config_xml: test.error("Backing up xmlfile failed.") try: # Change VM xml in below part if contrl_index: new_xml.remove_all_device_by_type('controller') logging.debug("After removing controllers, current XML:\n%s\n", new_xml) add_ctrls(new_xml, dev_index=contrl_index) if extra.count("--tls"): qemu_conf_dict = {"migrate_tls_x509_verify": "1"} # Setup TLS tls_obj = TLSConnection(params) if tls_recovery == "yes": objs_list.append(tls_obj) tls_obj.auto_recover = True tls_obj.conn_setup() # Setup qemu configure logging.debug("Configure the qemu") cleanup_libvirtd_log(log_file) qemu_conf = libvirt.customize_libvirt_config(qemu_conf_dict, config_type="qemu", remote_host=True, extra_params=params) # Setup libvirtd if config_libvirtd: logging.debug("Configure the libvirtd") cleanup_libvirtd_log(log_file) libvirtd_conf_dict = setup_libvirtd_conf_dict(params) libvirtd_conf = libvirt.customize_libvirt_config( libvirtd_conf_dict, remote_host=True, extra_params=params) if hpt_resize: set_feature(new_xml, 'hpt', hpt_resize) if htm_state: set_feature(new_xml, 'htm', htm_state) # Change the disk of the vm to shared disk and then start VM libvirt.set_vm_disk(vm, params) if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) if qemu_check: check_content = qemu_check if hpt_resize: check_content = "%s%s" % (qemu_check, hpt_resize) if htm_state: check_content = "%s%s" % (qemu_check, htm_state) check_qemu_cmd_line(check_content) vm_session = vm.wait_for_login() check_vm_network_accessed() if hpt_resize and hpt_resize != 'disabled': trigger_hpt_resize(vm_session) if stress_in_vm: pkg_name = 'stress' logging.debug("Check if stress tool is installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_name) if not pkg_mgr.is_installed(pkg_name): logging.debug("Stress tool will be installed") if not pkg_mgr.install(): test.error("Package '%s' installation fails" % pkg_name) stress_thread = threading.Thread(target=run_stress_in_vm, args=()) stress_thread.start() if extra.count("timeout-postcopy"): asynch_migration = True func_name = check_timeout_postcopy if extra.count("comp-xbzrle-cache"): cache = get_usable_compress_cache(memory.get_page_size()) extra = "%s %s" % (extra, cache) # For --postcopy enable postcopy_options = params.get("postcopy_options") if postcopy_options and not extra.count(postcopy_options): extra = "%s %s" % (extra, postcopy_options) if not asynch_migration: mig_result = do_migration(vm, dest_uri, options, extra) else: migration_test = libvirt.MigrationTest() logging.debug("vm.connect_uri=%s", vm.connect_uri) vms = [vm] try: migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, func=func_name, extra_opts=extra, func_params=params) mig_result = migration_test.ret except exceptions.TestFail as fail_detail: test.fail(fail_detail) except exceptions.TestSkipError as skip_detail: test.cancel(skip_detail) except exceptions.TestError as error_detail: test.error(error_detail) except Exception as details: mig_result = migration_test.ret logging.error(details) if int(mig_result.exit_status) != 0: test.fail(results_stderr_52lts(mig_result).strip()) if check_complete_job: search_str_domjobinfo = params.get("search_str_domjobinfo", None) opts = "--completed" args = vm_name + " " + opts check_virsh_command_and_option("domjobinfo", opts) jobinfo = results_stdout_52lts( virsh.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Local job info on completion:\n%s", jobinfo) if extra.count("comp-xbzrle-cache") and search_str_domjobinfo: search_str_domjobinfo = "%s %s" % (search_str_domjobinfo, cache // 1024) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): test.fail("Fail to search '%s' on local:\n%s" % (search_str_domjobinfo, jobinfo)) # Check remote host if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) jobinfo = results_stdout_52lts( remote_virsh_session.domjobinfo(args, debug=True, ignore_status=True)).strip() logging.debug("Remote job info on completion:\n%s", jobinfo) if search_str_domjobinfo: if not re.search(search_str_domjobinfo, jobinfo): remote_virsh_session.close_session() test.fail("Fail to search '%s' on remote:\n%s" % (search_str_domjobinfo, jobinfo)) remote_virsh_session.close_session() if grep_str_local_log: cmd = "grep -E '%s' %s" % (grep_str_local_log, log_file) cmdRes = process.run(cmd, shell=True, ignore_status=True) if cmdRes.exit_status: test.fail(results_stderr_52lts(cmdRes).strip()) if grep_str_remote_log: cmd = "grep -E '%s' %s" % (grep_str_remote_log, log_file) run_remote_cmd(cmd) if xml_check_after_mig: if not remote_virsh_session: remote_virsh_session = virsh.VirshPersistent( **remote_virsh_dargs) target_guest_dumpxml = results_stdout_52lts( remote_virsh_session.dumpxml(vm_name, debug=True, ignore_status=True)).strip() if hpt_resize: check_str = hpt_resize elif htm_state: check_str = htm_state if hpt_resize or htm_state: xml_check_after_mig = "%s'%s'" % (xml_check_after_mig, check_str) if not re.search(xml_check_after_mig, target_guest_dumpxml): remote_virsh_session.close_session() test.fail("Fail to search '%s' in target guest XML:\n%s" % (xml_check_after_mig, target_guest_dumpxml)) if contrl_index: all_ctrls = re.findall(xml_check_after_mig, target_guest_dumpxml) if len(all_ctrls) != int(contrl_index) + 1: remote_virsh_session.close_session() test.fail( "%s pci-root controllers are expected in guest XML, " "but found %s" % (int(contrl_index) + 1, len(all_ctrls))) remote_virsh_session.close_session() server_session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") check_vm_network_accessed(server_session) server_session.close() except exceptions.TestFail as details: is_TestFail = True test_exception = details except exceptions.TestSkipError as details: is_TestSkip = True test_exception = details except exceptions.TestError as details: is_TestError = True test_exception = details except Exception as details: test_exception = details finally: logging.debug("Recover test environment") try: # Clean VM on destination vm.connect_uri = dest_uri cleanup_dest(vm) vm.connect_uri = src_uri logging.info("Recovery VM XML configration") orig_config_xml.sync() logging.debug("The current VM XML:\n%s", orig_config_xml.xmltreefile) if remote_virsh_session: remote_virsh_session.close_session() if extra.count("--tls"): logging.debug("Recover the qemu configuration") libvirt.customize_libvirt_config(None, config_type="qemu", remote_host=True, extra_params=params, is_recover=True, config_object=qemu_conf) if config_libvirtd: logging.debug("Recover the libvirtd configuration") libvirt.customize_libvirt_config(None, remote_host=True, extra_params=params, is_recover=True, config_object=libvirtd_conf) logging.info("Remove local NFS image") source_file = params.get("source_file") libvirt.delete_local_disk("file", path=source_file) if objs_list: for obj in objs_list: logging.debug("Clean up local objs") del obj except Exception as exception_detail: if (not test_exception and not is_TestError and not is_TestFail and not is_TestSkip): raise exception_detail else: # if any of above exceptions has been raised, only print # error log here to avoid of hiding the original issue logging.error(exception_detail) # Check result if is_TestFail: test.fail(test_exception) if is_TestSkip: test.cancel(test_exception) if is_TestError: test.error(test_exception) if not test_exception: logging.info("Case execution is done.") else: test.error(test_exception)
def run(test, params, env): """ Test storage migration 1) Do storage migration(copy-storage-all/copy-storage-inc) with TLS encryption - NBD transport 2) Cancel storage migration with TLS encryption 3) Copy only the top image for storage migration with backing chain :param test: test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ def prepare_nfs_backingfile(vm, params): """ Create an image using nfs type backing_file :param vm: The guest :param params: the parameters used """ mnt_path_name = params.get("nfs_mount_dir", "nfs-mount") exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0") exp_dir = params.get("export_dir", "nfs-export") backingfile_img = params.get("source_dist_img", "nfs-img") disk_format = params.get("disk_format", "qcow2") img_name = params.get("img_name", "test.img") precreation = "yes" == params.get("precreation", "yes") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) disk_xml = vmxml.devices.by_device_tag('disk')[0] src_disk_format = disk_xml.xmltreefile.find('driver').get('type') first_disk = vm.get_first_disk_devices() blk_source = first_disk['source'] disk_img = os.path.join(os.path.dirname(blk_source), img_name) res = libvirt.setup_or_cleanup_nfs(True, mnt_path_name, is_mount=True, export_options=exp_opt, export_dir=exp_dir) mnt_path = res["mount_dir"] params["selinux_status_bak"] = res["selinux_status_bak"] if vm.is_alive(): vm.destroy(gracefully=False) disk_cmd = ("qemu-img convert -f %s -O %s %s %s/%s" % (src_disk_format, disk_format, blk_source, mnt_path, backingfile_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append("%s/%s" % (mnt_path, backingfile_img)) logging.debug("Create a local image backing on NFS.") disk_cmd = ("qemu-img create -f %s -b %s/%s %s" % (disk_format, mnt_path, backingfile_img, disk_img)) process.run(disk_cmd, ignore_status=False, verbose=True) local_image_list.append(disk_img) if precreation: logging.debug("Create an image backing on NFS on remote host.") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) status, stdout = utils_misc.cmd_status_output( disk_cmd, session=remote_session) logging.debug("status: {}, stdout: {}".format(status, stdout)) remote_image_list.append("%s/%s" % (mnt_path, backingfile_img)) remote_image_list.append(disk_img) remote_session.close() params.update({'disk_source_name': disk_img, 'disk_type': 'file', 'disk_source_protocol': 'file'}) libvirt.set_vm_disk(vm, params) migration_test = migration.MigrationTest() migration_test.check_parameters(params) # Local variables server_ip = params["server_ip"] = params.get("remote_ip") server_user = params["server_user"] = params.get("remote_user", "root") server_pwd = params["server_pwd"] = params.get("remote_pwd") client_ip = params["client_ip"] = params.get("local_ip") client_pwd = params["client_pwd"] = params.get("local_pwd") virsh_options = params.get("virsh_options", "") copy_storage_option = params.get("copy_storage_option") extra = params.get("virsh_migrate_extra", "") options = params.get("virsh_migrate_options", "--live --verbose") backingfile_type = params.get("backingfile_type") check_str_local_log = params.get("check_str_local_log", "") disk_format = params.get("disk_format", "qcow2") log_file = params.get("log_outputs", "/var/log/libvirt/libvirtd.log") daemon_conf_dict = eval(params.get("daemon_conf_dict", '{}')) cancel_migration = "yes" == params.get("cancel_migration", "no") migrate_again = "yes" == params.get("migrate_again", "no") precreation = "yes" == params.get("precreation", "yes") tls_recovery = "yes" == params.get("tls_auto_recovery", "yes") func_params_exists = "yes" == params.get("func_params_exists", "no") status_error = "yes" == params.get("status_error", "no") local_image_list = [] remote_image_list = [] tls_obj = None func_name = None daemon_conf = None mig_result = None remote_session = None vm_session = None # params for migration connection params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( params.get("migrate_dest_host")) dest_uri = params.get("virsh_migrate_desturi") vm_name = params.get("migrate_main_vm") vm = env.get_vm(vm_name) vm.verify_alive() extra = "{} {}".format(extra, copy_storage_option) extra_args = {} if func_params_exists: extra_args.update({'func_params': params}) if cancel_migration: func_name = migration_test.do_cancel # For safety reasons, we'd better back up xmlfile. vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() try: if backingfile_type: if backingfile_type == "nfs": prepare_nfs_backingfile(vm, params) if extra.count("copy-storage-all") and precreation: blk_source = vm.get_first_disk_devices()['source'] vsize = utils_misc.get_image_info(blk_source).get("vsize") remote_session = remote.remote_login("ssh", server_ip, "22", server_user, server_pwd, r'[$#%]') utils_misc.make_dirs(os.path.dirname(blk_source), remote_session) disk_cmd = ("qemu-img create -f %s %s %s" % (disk_format, blk_source, vsize)) status, stdout = utils_misc.cmd_status_output( disk_cmd, session=remote_session) logging.debug("status: {}, stdout: {}".format(status, stdout)) remote_image_list.append(blk_source) remote_session.close() # Update libvirtd configuration if daemon_conf_dict: if os.path.exists(log_file): os.remove(log_file) daemon_conf = libvirt.customize_libvirt_config(daemon_conf_dict) if extra.count("--tls"): tls_obj = TLSConnection(params) if tls_recovery: tls_obj.auto_recover = True tls_obj.conn_setup() if not vm.is_alive(): vm.start() logging.debug("Guest xml after starting:\n%s", vm_xml.VMXML.new_from_dumpxml(vm_name)) # Check local guest network connection before migration vm_session = vm.wait_for_login(restart_network=True) migration_test.ping_vm(vm, params) # Execute migration process vms = [vm] migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if migrate_again and status_error: logging.debug("Sleeping 10 seconds before rerun migration") time.sleep(10) if cancel_migration: func_name = None params["status_error"] = "no" migration_test.do_migration(vms, None, dest_uri, 'orderly', options, thread_timeout=900, ignore_status=True, virsh_opt=virsh_options, extra_opts=extra, func=func_name, **extra_args) mig_result = migration_test.ret migration_test.check_result(mig_result, params) if int(mig_result.exit_status) == 0: migration_test.ping_vm(vm, params, uri=dest_uri) if check_str_local_log: libvirt.check_logfile(check_str_local_log, log_file) finally: logging.debug("Recover test environment") # Clean VM on destination and source try: migration_test.cleanup_dest_vm(vm, vm.connect_uri, dest_uri) if vm.is_alive(): vm.destroy(gracefully=False) except Exception as err: logging.error(err) logging.info("Recovery VM XML configration") orig_config_xml.sync() if daemon_conf: logging.debug("Recover the configurations") libvirt.customize_libvirt_config(None, is_recover=True, config_object=daemon_conf) if tls_obj: logging.debug("Clean up local objs") del tls_obj for source_file in local_image_list: libvirt.delete_local_disk("file", path=source_file) for img in remote_image_list: remote.run_remote_cmd("rm -rf %s" % img, params) if remote_session: remote_session.close()
def run(test, params, env): """ Test TLS connection priorities with remote machine. """ server_ip = params.get("server_ip") server_user = params.get("server_user") server_pwd = params.get("server_pwd") tls_port = params.get("tls_port", "16514") ssl_v3 = params.get("priority_ssl_v3_only") ssl_inv = params.get("priority_ssl_invalid") tls_inv = params.get("priority_tls_invalid") no_ssl_v3 = params.get("priority_no_ssl_v3") wrong_message = params.get("wrong_priorities_message").split(',') invalid_message = params.get("invalid_priorities_message").split(',') welcome_message = params.get("successful_message").split(',') uri = "qemu+tls://{}" remote_update_session = None # Initial Setup - this part is the longest and always required part of the # test and therefore it is not effective to divide this test into smaller # parts that would take much longer in sum. # Create connection with the server server_session = create_session_on_remote(params) # Make sure the Libvirtd is running on remote remote_libvirtd = utils_libvirtd.Libvirtd(session=server_session) if not remote_libvirtd.is_running(): res = remote_libvirtd.start() if not res: status, output = server_session.cmd_status_output("journalctl -xe") test.error("Failed to start libvirtd on remote. [status]: %s " "[output]: %s." % (status, output)) # setup TLS tls_obj = TLSConnection(params) # setup test environment tls_obj.conn_setup() # Open the tls/tcp listening port on server firewall_cmd = utils_iptables.Firewall_cmd(server_session) firewall_cmd.add_port(tls_port, 'tcp', permanent=True) server_session.close() # Check which version to use for TLS v1.X ssl_check = process.run( 'openssl s_client -connect {}:16514 -tls1_1'.format(server_ip), ignore_status=True) if ssl_check.exit_status: logging.debug('TLSv1.1 not supported, use TLS v1.2 instead') tls_v1 = params.get("priority_tls_v1_2_only") else: # Use TLS v1.0 tls_v1 = params.get("priority_tls_v1_only") # Update TLS priority on remote replacements = { r'#tls_priority=".*?"': 'tls_priority="{}"'.format(params['remote_tls_priority']) } # DO NOT DELETE remote_update_session as it destroys changes applied by the # method remote_libvirtdconf.sub_else_add - cleanup called when the instance # is destroyed remote_update_session = change_parameter_on_remote(params, replacements) # Restart Remote Libvirtd " remotely_control_libvirtd(server_ip, server_user, server_pwd, 'restart') config = None try: # Phase I - test via libvirt.conf on client # TLS priority set to SSLv3 only for client via libvirt.conf new_tls_priority = {'tls_priority': '"{}"'.format(ssl_v3)} config = change_libvirtconf_on_client(new_tls_priority) # Connect to hypervisor uri_path = server_ip + '/system' connect_dict = { 'uri': uri.format(uri_path), 'cmd': "", 'pass_message_list': wrong_message, 'expect_error': True, } test_pass = connect_to_server_hypervisor(connect_dict) # Restore if pass if test_pass: config.restore() config = None else: test.fail( 'TLS priorities test failed for case when client supports' ' SSLv3 only and server does not support SSLv3 only.') # TLS priority set to TLS1.0 only for client via libvirt.conf new_tls_priority = {'tls_priority': '"{}"'.format(tls_v1)} config = change_libvirtconf_on_client(new_tls_priority) # Remove SSLv3.0 support via URI uri_path = server_ip + '/system' + '?tls_priority={}'.format(no_ssl_v3) connect_dict['uri'] = uri.format(uri_path) connect_dict['pass_message_list'] = welcome_message connect_dict['expect_error'] = False test_pass = connect_to_server_hypervisor(connect_dict) if test_pass: config.restore() config = None else: test.fail( 'TLS priorities test failed for case when client supports' ' TLSv1.X only and server does not support SSLv3 only.') # Pass invalid SSL priority new_tls_priority = {'tls_priority': '"{}"'.format(ssl_inv)} config = change_libvirtconf_on_client(new_tls_priority) uri_path = server_ip + '/system' connect_dict['uri'] = uri.format(uri_path) connect_dict['pass_message_list'] = invalid_message connect_dict['expect_error'] = True test_pass = connect_to_server_hypervisor(connect_dict) if test_pass: config.restore() config = None else: test.fail( 'TLS priorities test failed for case when client supports' ' SSLv4.0 which is invalid.') # Phase II - test via URI on client uri_path = server_ip + '/system' + '?tls_priority={}'.format(ssl_v3) connect_dict['uri'] = uri.format(uri_path) connect_dict['pass_message_list'] = wrong_message connect_dict['expect_error'] = True test_pass = connect_to_server_hypervisor(connect_dict) if not test_pass: test.fail('TLS priorities test failed for case when the client ' 'supports SSLv3 only by URI and the server does not ' 'support SSLv3 only.') uri_path = server_ip + '/system' + '?tls_priority={}'.format(tls_v1) connect_dict['uri'] = uri.format(uri_path) connect_dict['pass_message_list'] = welcome_message connect_dict['expect_error'] = False test_pass = connect_to_server_hypervisor(connect_dict) if not test_pass: test.fail('TLS priorities test failed for case when the client ' 'supports TLSv1.X only by URI and the server does not ' 'support SSLv3 only.') uri_path = server_ip + '/system' + '?tls_priority={}'.format(tls_inv) connect_dict['uri'] = uri.format(uri_path) connect_dict['pass_message_list'] = invalid_message connect_dict['expect_error'] = True test_pass = connect_to_server_hypervisor(connect_dict) if not test_pass: test.fail('TLS priorities test failed for case when the client ' 'supports SSLv4.0 by URI which is invalid.') finally: # Reset Firewall server_session = create_session_on_remote(params) firewall_cmd = utils_iptables.Firewall_cmd(server_session) firewall_cmd.remove_port(tls_port, 'tcp') server_session.close() if config: config.restore() # Restore config on remote if remote_update_session: del remote_update_session
def run(test, params, env): """ Test the pull-mode backup function Steps: 1. craete a vm with extra disk vdb 2. create some data on vdb 3. start a pull mode full backup on vdb 4. create some data on vdb 5. start a pull mode incremental backup 6. repeat step 5 to 7 7. check the full/incremental backup file data """ # Basic case config hotplug_disk = "yes" == params.get("hotplug_disk", "no") original_disk_size = params.get("original_disk_size", "100M") original_disk_type = params.get("original_disk_type", "local") original_disk_target = params.get("original_disk_target", "vdb") local_hostname = params.get("loal_hostname", "localhost") local_ip = params.get("local_ip", "127.0.0.1") local_user_name = params.get("local_user_name", "root") local_user_password = params.get("local_user_password", "redhat") tmp_dir = data_dir.get_tmp_dir() # Backup config scratch_type = params.get("scratch_type", "file") reuse_scratch_file = "yes" == params.get("reuse_scratch_file") prepare_scratch_file = "yes" == params.get("prepare_scratch_file") scratch_blkdev_path = params.get("scratch_blkdev_path") scratch_blkdev_size = params.get("scratch_blkdev_size", original_disk_size) prepare_scratch_blkdev = "yes" == params.get("prepare_scratch_blkdev") backup_rounds = int(params.get("backup_rounds", 3)) backup_error = "yes" == params.get("backup_error") expect_backup_canceled = "yes" == params.get("expect_backup_canceled") # NBD service config nbd_protocol = params.get("nbd_protocol", "unix") nbd_socket = params.get("nbd_socket", "/tmp/pull_backup.socket") nbd_tcp_port = params.get("nbd_tcp_port", "10809") nbd_hostname = local_hostname set_exportname = "yes" == params.get("set_exportname") set_exportbitmap = "yes" == params.get("set_exportbitmap") # TLS service config tls_enabled = "yes" == params.get("tls_enabled") tls_x509_verify = "yes" == params.get("tls_x509_verify") custom_pki_path = "yes" == params.get("custom_pki_path") tls_client_ip = tls_server_ip = local_ip tls_client_cn = tls_server_cn = local_hostname tls_client_user = tls_server_user = local_user_name tls_client_pwd = tls_server_pwd = local_user_password tls_provide_client_cert = "yes" == params.get("tls_provide_client_cert") tls_error = "yes" == params.get("tls_error") # LUKS config scratch_luks_encrypted = "yes" == params.get("scratch_luks_encrypted") luks_passphrase = params.get("luks_passphrase", "password") # Cancel the test if libvirt support related functions if not libvirt_version.version_compare(6, 0, 0): test.cancel("Current libvirt version doesn't support " "incremental backup.") if tls_enabled and not libvirt_version.version_compare(6, 6, 0): test.cancel("Current libvirt version doesn't support pull mode " "backup with tls nbd.") try: vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Make sure there is no checkpoint metadata before test utils_backup.clean_checkpoints(vm_name) # Backup vm xml vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() utils_backup.enable_inc_backup_for_vm(vm) # Prepare tls env if tls_enabled: # Prepare pki tls_config = { "qemu_tls": "yes", "auto_recover": "yes", "client_ip": tls_client_ip, "server_ip": tls_server_ip, "client_cn": tls_client_cn, "server_cn": tls_server_cn, "client_user": tls_client_user, "server_user": tls_server_user, "client_pwd": tls_client_pwd, "server_pwd": tls_server_pwd, } if custom_pki_path: pki_path = os.path.join(tmp_dir, "inc_bkup_pki") else: pki_path = "/etc/pki/libvirt-backup/" if tls_x509_verify: tls_config["client_ip"] = tls_client_ip tls_config["custom_pki_path"] = pki_path tls_obj = TLSConnection(tls_config) tls_obj.conn_setup(True, tls_provide_client_cert) logging.debug("TLS certs in: %s" % pki_path) # Set qemu.conf qemu_config = LibvirtQemuConfig() if tls_x509_verify: qemu_config.backup_tls_x509_verify = True else: qemu_config.backup_tls_x509_verify = False if custom_pki_path: qemu_config.backup_tls_x509_cert_dir = pki_path utils_libvirtd.Libvirtd().restart() # Prepare libvirt secret if scratch_luks_encrypted: utils_secret.clean_up_secrets() luks_secret_uuid = libvirt.create_secret(params) virsh.secret_set_value(luks_secret_uuid, luks_passphrase, encode=True, debug=True) # Prepare the disk to be backuped. disk_params = {} disk_path = "" if original_disk_type == "local": image_name = "{}_image.qcow2".format(original_disk_target) disk_path = os.path.join(tmp_dir, image_name) libvirt.create_local_disk("file", disk_path, original_disk_size, "qcow2") disk_params = { "device_type": "disk", "type_name": "file", "driver_type": "qcow2", "target_dev": original_disk_target, "source_file": disk_path } if original_disk_target: disk_params["target_dev"] = original_disk_target elif original_disk_type == "iscsi": iscsi_host = '127.0.0.1' iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi( is_setup=True, is_login=False, image_size=original_disk_size, portal_ip=iscsi_host) disk_path = ("iscsi://[%s]/%s/%s" % (iscsi_host, iscsi_target, lun_num)) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'iscsi', 'source_name': iscsi_target + "/%s" % lun_num, 'source_host_name': iscsi_host, 'source_host_port': '3260' } disk_params.update(disk_params_src) elif original_disk_type == "gluster": gluster_vol_name = "gluster_vol" gluster_pool_name = "gluster_pool" gluster_img_name = "gluster.qcow2" gluster_host_ip = gluster.setup_or_cleanup_gluster( is_setup=True, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) disk_path = 'gluster://%s/%s/%s' % ( gluster_host_ip, gluster_vol_name, gluster_img_name) process.run("qemu-img create -f qcow2 %s %s" % (disk_path, original_disk_size), shell=True, verbose=True) disk_params = { 'device_type': 'disk', 'type_name': 'network', "driver_type": "qcow2", 'target_dev': original_disk_target } disk_params_src = { 'source_protocol': 'gluster', 'source_name': gluster_vol_name + "/%s" % gluster_img_name, 'source_host_name': gluster_host_ip, 'source_host_port': '24007' } disk_params.update(disk_params_src) else: test.error("The disk type '%s' not supported in this script.", original_disk_type) if hotplug_disk: vm.start() session = vm.wait_for_login().close() disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm_name, disk_xml, debug=True) else: disk_xml = libvirt.create_disk_xml(disk_params) virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True) vm.start() session = vm.wait_for_login() new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys()) session.close() if len(new_disks_in_vm) != 1: test.fail("Test disk not prepared in vm") # Use the newly added disk as the test disk test_disk_in_vm = "/dev/" + new_disks_in_vm[0] vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vm_disks = list(vmxml.get_disk_all().keys()) checkpoint_list = [] is_incremental = False backup_file_list = [] for backup_index in range(backup_rounds): # Prepare backup xml backup_params = {"backup_mode": "pull"} if backup_index > 0: is_incremental = True backup_params["backup_incremental"] = "checkpoint_" + str( backup_index - 1) # Set libvirt default nbd export name and bitmap name nbd_export_name = original_disk_target nbd_bitmap_name = "backup-" + original_disk_target backup_server_dict = {} if nbd_protocol == "unix": backup_server_dict["transport"] = "unix" backup_server_dict["socket"] = nbd_socket else: backup_server_dict["name"] = nbd_hostname backup_server_dict["port"] = nbd_tcp_port if tls_enabled: backup_server_dict["tls"] = "yes" backup_params["backup_server"] = backup_server_dict backup_disk_xmls = [] for vm_disk in vm_disks: backup_disk_params = {"disk_name": vm_disk} if vm_disk != original_disk_target: backup_disk_params["enable_backup"] = "no" else: backup_disk_params["enable_backup"] = "yes" backup_disk_params["disk_type"] = scratch_type # Custom nbd export name and bitmap name if required if set_exportname: nbd_export_name = original_disk_target + "_custom_exp" backup_disk_params["exportname"] = nbd_export_name if set_exportbitmap: nbd_bitmap_name = original_disk_target + "_custom_bitmap" backup_disk_params["exportbitmap"] = nbd_bitmap_name # Prepare nbd scratch file/dev params scratch_params = {"attrs": {}} scratch_path = None if scratch_type == "file": scratch_file_name = "scratch_file_%s" % backup_index scratch_path = os.path.join(tmp_dir, scratch_file_name) if prepare_scratch_file: libvirt.create_local_disk("file", scratch_path, original_disk_size, "qcow2") scratch_params["attrs"]["file"] = scratch_path elif scratch_type == "block": if prepare_scratch_blkdev: scratch_path = libvirt.setup_or_cleanup_iscsi( is_setup=True, image_size=scratch_blkdev_size) scratch_params["attrs"]["dev"] = scratch_path else: test.fail( "We do not support backup scratch type: '%s'" % scratch_type) if scratch_luks_encrypted: encryption_dict = { "encryption": "luks", "secret": { "type": "passphrase", "uuid": luks_secret_uuid } } scratch_params["encryption"] = encryption_dict logging.debug("scratch params: %s", scratch_params) backup_disk_params["backup_scratch"] = scratch_params backup_disk_xml = utils_backup.create_backup_disk_xml( backup_disk_params) backup_disk_xmls.append(backup_disk_xml) logging.debug("disk list %s", backup_disk_xmls) backup_xml = utils_backup.create_backup_xml( backup_params, backup_disk_xmls) logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml) # Prepare checkpoint xml checkpoint_name = "checkpoint_%s" % backup_index checkpoint_list.append(checkpoint_name) cp_params = {"checkpoint_name": checkpoint_name} cp_params["checkpoint_desc"] = params.get( "checkpoint_desc", "desc of cp_%s" % backup_index) disk_param_list = [] for vm_disk in vm_disks: cp_disk_param = {"name": vm_disk} if vm_disk != original_disk_target: cp_disk_param["checkpoint"] = "no" else: cp_disk_param["checkpoint"] = "bitmap" cp_disk_bitmap = params.get("cp_disk_bitmap") if cp_disk_bitmap: cp_disk_param["bitmap"] = cp_disk_bitmap + str( backup_index) disk_param_list.append(cp_disk_param) checkpoint_xml = utils_backup.create_checkpoint_xml( cp_params, disk_param_list) logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index, checkpoint_xml) # Create some data in vdb dd_count = "1" dd_seek = str(backup_index * 10 + 10) dd_bs = "1M" session = vm.wait_for_login() utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs, dd_seek, dd_count) session.close() # Start backup backup_options = backup_xml.xml + " " + checkpoint_xml.xml if reuse_scratch_file: backup_options += " --reuse-external" backup_result = virsh.backup_begin(vm_name, backup_options, ignore_status=True, debug=True) if backup_result.exit_status: raise utils_backup.BackupBeginError( backup_result.stderr.strip()) # If required, do some error operations during backup job error_operation = params.get("error_operation") if error_operation: if "destroy_vm" in error_operation: vm.destroy(gracefully=False) if "kill_qemu" in error_operation: utils_misc.safe_kill(vm.get_pid(), signal.SIGKILL) if utils_misc.wait_for( lambda: utils_backup.is_backup_canceled(vm_name), timeout=5): raise utils_backup.BackupCanceledError() elif expect_backup_canceled: test.fail("Backup job should be canceled but not.") backup_file_path = os.path.join( tmp_dir, "backup_file_%s.qcow2" % str(backup_index)) backup_file_list.append(backup_file_path) nbd_params = { "nbd_protocol": nbd_protocol, "nbd_export": nbd_export_name } if nbd_protocol == "unix": nbd_params["nbd_socket"] = nbd_socket elif nbd_protocol == "tcp": nbd_params["nbd_hostname"] = nbd_hostname nbd_params["nbd_tcp_port"] = nbd_tcp_port if tls_enabled: nbd_params["tls_dir"] = pki_path nbd_params["tls_server_ip"] = tls_server_ip if not is_incremental: # Do full backup try: utils_backup.pull_full_backup_to_file( nbd_params, backup_file_path) except Exception as details: if tls_enabled and tls_error: raise utils_backup.BackupTLSError(details) else: test.fail("Fail to get full backup data: %s" % details) logging.debug("Full backup to: %s", backup_file_path) else: # Do incremental backup utils_backup.pull_incremental_backup_to_file( nbd_params, backup_file_path, nbd_bitmap_name, original_disk_size) # Check if scratch file encrypted if scratch_luks_encrypted and scratch_path: cmd = "qemu-img info -U %s" % scratch_path result = process.run(cmd, shell=True, verbose=True).stdout_text.strip() if (not re.search("format.*luks", result, re.IGNORECASE) or not re.search("encrypted.*yes", result, re.IGNORECASE)): test.fail("scratch file/dev is not encrypted by LUKS") virsh.domjobabort(vm_name, debug=True) for checkpoint_name in checkpoint_list: virsh.checkpoint_delete(vm_name, checkpoint_name, debug=True) if vm.is_alive(): vm.destroy(gracefully=False) # Compare the backup data and original data original_data_file = os.path.join(tmp_dir, "original_data.qcow2") cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (disk_path, original_data_file) process.run(cmd, shell=True, verbose=True) for backup_file in backup_file_list: if not utils_backup.cmp_backup_data(original_data_file, backup_file): test.fail("Backup and original data are not identical for" "'%s' and '%s'" % (disk_path, backup_file)) else: logging.debug("'%s' contains correct backup data", backup_file) except utils_backup.BackupBeginError as detail: if backup_error: logging.debug("Backup failed as expected.") else: test.fail("Backup failed to start: %s" % detail) except utils_backup.BackupTLSError as detail: if tls_error: logging.debug("Failed to get backup data as expected.") else: test.fail("Failed to get tls backup data: %s" % detail) except utils_backup.BackupCanceledError as detail: if expect_backup_canceled: logging.debug("Backup canceled as expected.") if not vm.is_alive(): logging.debug("Check if vm can be started again when backup " "canceled.") vm.start() vm.wait_for_login().close() else: test.fail("Backup job canceled: %s" % detail) finally: # Remove checkpoints clean_checkpoint_metadata = not vm.is_alive() if "error_operation" in locals() and error_operation is not None: if "kill_qemu" in error_operation: clean_checkpoint_metadata = True utils_backup.clean_checkpoints( vm_name, clean_metadata=clean_checkpoint_metadata) if vm.is_alive(): vm.destroy(gracefully=False) # Restoring vm vmxml_backup.sync() # Remove iscsi devices if original_disk_type == "iscsi" or scratch_type == "block": libvirt.setup_or_cleanup_iscsi(False) # Remove gluster devices if original_disk_type == "gluster": gluster.setup_or_cleanup_gluster(is_setup=False, vol_name=gluster_vol_name, pool_name=gluster_pool_name, **params) # Recover qemu.conf if "qemu_config" in locals(): qemu_config.restore() # Remove tls object if "tls_obj" in locals(): del tls_obj # Remove libvirt secret if "luks_secret_uuid" in locals(): virsh.secret_undefine(luks_secret_uuid, ignore_status=True)
def run(test, params, env): """ Test virt-admin srv-clients-info 1) Change the clients related parameters in daemon config file; 2) Restart daemon; 3) Start several virsh connections; 4) Check whether the parameters value listed by srv-clients-info are the same with the above settings. """ max_clients = params.get("max_clients") max_anonymous_clients = params.get("max_anonymous_clients") server_name = params.get("server_name") num_clients = params.get("num_clients") server_ip = params["server_ip"] = params.get("local_ip") server_user = params["server_user"] = params.get("local_user", "root") server_pwd = params["server_pwd"] = params.get("local_pwd") client_ip = params["client_ip"] = params.get("remote_ip") client_pwd = params["client_pwd"] = params.get("remote_pwd") client_user = params["server_user"] = params.get("remote_user", "root") tls_port = params.get("tls_port", "16514") tls_uri = "qemu+tls://%s:%s/system" % (server_ip, tls_port) tls_obj = None remote_virsh_dargs = {'remote_ip': client_ip, 'remote_user': client_user, 'remote_pwd': client_pwd, 'uri': tls_uri, 'ssh_remote_auth': True} if not server_name: server_name = virt_admin.check_server_name() config = virt_admin.managed_daemon_config() daemon = utils_libvirtd.Libvirtd("virtproxyd") try: config.max_clients = max_clients config.max_anonymous_clients = max_anonymous_clients daemon.restart() tls_obj = TLSConnection(params) tls_obj.conn_setup() tls_obj.auto_recover = True utils_iptables.Firewall_cmd().add_port(tls_port, 'tcp', permanent=True) clients_instant = [] for _ in range(int(num_clients)): # Under split daemon mode, we can connect to virtproxyd via # remote tls/tcp connections,can not connect to virtproxyd direct # on local host clients_instant.append(virsh.VirshPersistent(**remote_virsh_dargs)) result = virt_admin.srv_clients_info(server_name, ignore_status=True, debug=True) output = result.stdout_text.strip().splitlines() out_split = [item.split(':') for item in output] out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) if result.exit_status: test.fail("This operation should success " "but failed. Output:\n %s" % result) else: if not (out_dict["nclients_max"] == max_clients and out_dict["nclients_unauth_max"] == max_anonymous_clients): test.fail("attributes info listed by " "srv-clients-info is not correct.") if not out_dict["nclients"] == num_clients: test.fail("the number of clients connect to daemon " "is not correct.") finally: config.restore() daemon.restart() utils_iptables.Firewall_cmd().remove_port(tls_port, 'tcp', permanent=True)