def test_cmd_split(self): plain_str = '' unicode_str = u'' empty_bytes = b'' # shlex.split() can work with "plain_str" and "unicode_str" on both # Python 2 and Python 3. While we're not testing Python itself, # this will help us catch possible differences in the Python # standard library should they arise. self.assertEqual(shlex.split(plain_str), []) self.assertEqual(shlex.split(astring.to_text(plain_str)), []) self.assertEqual(shlex.split(unicode_str), []) self.assertEqual(shlex.split(astring.to_text(unicode_str)), []) # on Python 3, shlex.split() won't work with bytes, raising: # AttributeError: 'bytes' object has no attribute 'read'. # To turn bytes into text (when necessary), that is, on # Python 3 only, use astring.to_text() self.assertEqual(shlex.split(astring.to_text(empty_bytes)), []) # Now let's test our specific implementation to split commands self.assertEqual(process.cmd_split(plain_str), []) self.assertEqual(process.cmd_split(unicode_str), []) self.assertEqual(process.cmd_split(empty_bytes), []) unicode_command = u"avok\xe1do_test_runner arguments" self.assertEqual(process.cmd_split(unicode_command), [u"avok\xe1do_test_runner", u"arguments"])
def read_nonblocking(self, internal_timeout=None, timeout=None): """ Read from socket/fd until there is nothing to read for timeout seconds. :param internal_timeout: Time (seconds) to wait before we give up reading from the child process, or None to use the default value. :param timeout: Timeout for reading child process output. """ if internal_timeout is None: internal_timeout = 0.1 end_time = None if timeout: end_time = time.time() + timeout data = "" while True: try: if self.console_type in ['tcp', 'unix']: new_data = self.socket.recv( 1024, socket.MSG_DONTWAIT) data += astring.to_text(new_data, errors='ignore') return data elif self.console_type == 'udp': new_data, self.peer_addr = self.socket.recvfrom( 1024, socket.MSG_DONTWAIT) data += astring.to_text(new_data, errors='ignore') return data elif self.console_type in ['file', 'pipe', 'tls']: try: read_fds, _, _ = select.select( [self.read_fd], [], [], internal_timeout) except Exception: return data if self.read_fd in read_fds: if self.console_type == 'pipe': new_data = os.read(self.read_fd, 1024) else: new_data = self.read_fd.read(1024) if not new_data: return data data += new_data else: return data except socket.error as detail: err = detail.args[0] if err in [errno.EAGAIN, errno.EWOULDBLOCK]: return data else: raise detail except OSError as detail: err = detail.args[0] if err in [errno.EAGAIN, errno.EWOULDBLOCK]: return data else: raise detail if end_time and time.time() > end_time: return data
def test_to_text(self): text_1 = astring.to_text(b'\xc3\xa1', 'utf-8') text_2 = astring.to_text(u'\u00e1', 'utf-8') self.assertTrue(astring.is_text(text_1)) self.assertEqual(text_1, text_2) self.assertEqual(astring.to_text(Exception(u'\u00e1')), u"\xe1") # For tuple, dict and others astring.to_text is equivalent of str() # because on py3 it's unicode and on py2 it uses __repr__ (is encoded) self.assertEqual(astring.to_text({u'\xe1': 1}), str({u'\xe1': 1}))
def _render(self, result): tests = [] for test in result.tests: tests.append({'id': str(test.get('name', UNKNOWN)), 'start': test.get('time_start', -1), 'end': test.get('time_end', -1), 'time': test.get('time_elapsed', -1), 'status': test.get('status', {}), 'whiteboard': test.get('whiteboard', UNKNOWN), 'logdir': test.get('logdir', UNKNOWN), 'logfile': test.get('logfile', UNKNOWN), 'fail_reason': astring.to_text(test.get('fail_reason', UNKNOWN))}) content = {'job_id': result.job_unique_id, 'debuglog': result.logfile, 'tests': tests, 'total': result.tests_total, 'pass': result.passed, 'errors': result.errors, 'failures': result.failed, 'skip': result.skipped, 'cancel': result.cancelled, 'time': result.tests_total_time} return json.dumps(content, sort_keys=True, indent=4, separators=(',', ': '))
def mapping_to_tree_loader(loader, node, looks_like_node=False): """Maps yaml mapping tag to TreeNode structure""" _value = [] for key_node, value_node in node.value: # Allow only strings as dict keys if key_node.tag.startswith('!'): # reflect tags everywhere key = loader.construct_object(key_node) else: key = loader.construct_python_str(key_node) # If we are to keep them, use following, but we lose the control # for both, nodes and dicts # key = loader.construct_object(key_node) if isinstance(key, mux.Control): looks_like_node = True value = loader.construct_object(value_node) if isinstance(value, ListOfNodeObjects): looks_like_node = True _value.append((key, value)) if not looks_like_node: return collections.OrderedDict(_value) objects = ListOfNodeObjects() looks_like_node = False for name, values in _value: if isinstance(values, ListOfNodeObjects): # New node from list objects.append(tree_node_from_values(name, values)) elif values is None: # Empty node objects.append(cls_node(astring.to_text(name))) else: # Values objects.append((name, values)) return objects
def tree_node_from_values(name, values): """Create `name` node and add values""" def node_content_from_node(node, values, using): """Processes node values into the current node content""" for value in values: if isinstance(value, cls_node): node.add_child(value) elif isinstance(value[0], mux.Control): if value[0].code == YAML_USING: using = _handle_control_tag_using(path, name, using, value[1]) else: _handle_control_tag(path, cls_node, node, value) elif isinstance(value[1], collections.OrderedDict): child = tree_node_from_values(astring.to_text(value[0]), value[1]) node.add_child(child) else: node.value[value[0]] = value[1] return using def node_content_from_dict(node, values, using): """Processes dict values into the current node content""" for key, value in iteritems(values): if isinstance(key, mux.Control): if key.code == YAML_USING: using = _handle_control_tag_using(path, name, using, value) else: _handle_control_tag(path, cls_node, node, [key, value]) elif (isinstance(value, collections.OrderedDict) or value is None): node.add_child(tree_node_from_values(key, value)) else: node.value[key] = value return using # Initialize the node node = cls_node(astring.to_text(name)) if not values: return node using = '' # Fill the node content from parsed values if isinstance(values, dict): using = node_content_from_dict(node, values, using) else: using = node_content_from_node(node, values, using) # Prefix nodes if tag "!using" was used if using: node = _apply_using(name, cls_node, using, node) return node
def node_content_from_node(node, values, using): """Processes node values into the current node content""" for value in values: if isinstance(value, cls_node): node.add_child(value) elif isinstance(value[0], mux.Control): if value[0].code == YAML_USING: using = _handle_control_tag_using(path, name, using, value[1]) else: _handle_control_tag(path, cls_node, node, value) elif isinstance(value[1], collections.OrderedDict): child = tree_node_from_values(astring.to_text(value[0]), value[1]) node.add_child(child) else: node.value[value[0]] = value[1] return using
def _tree_node_from_values(path, name, values, using): """Create `name` node and add values""" # Initialize the node node = mux.MuxTreeNode(astring.to_text(name)) if not values: return node using = "" # Fill the node content from parsed values if isinstance(values, dict): using = _node_content_from_dict(path, node, values, using) else: using = _node_content_from_node(path, node, values, using) # Prefix nodes if tag "!using" was used if using: node = _apply_using(name, using, node) return node
def to_str(self, summary=0, variants=0, **kwargs): # pylint: disable=W0613 if not self.variants: return "" out = [] for variant in self.variants: paths = ', '.join([x.path for x in variant["variant"]]) out.append('\nVariant %s: %s' % (variant["variant_id"], paths)) env = set() for node in variant["variant"]: for key, value in node.environment.items(): origin = node.environment.origin[key].path env.add(("%s:%s" % (origin, key), astring.to_text(value))) if not env: continue fmt = ' %%-%ds => %%s' % max([len(_[0]) for _ in env]) for record in sorted(env): out.append(fmt % record) return "\n".join(out)
def node_content_from_node(node, values, using): """Processes node values into the current node content""" for value in values: if isinstance(value, cls_node): node.add_child(value) elif isinstance(value[0], mux.Control): if value[0].code == YAML_USING: using = _handle_control_tag_using( path, name, using, value[1]) else: _handle_control_tag(path, cls_node, node, value) elif isinstance(value[1], collections.OrderedDict): child = tree_node_from_values(astring.to_text(value[0]), value[1]) node.add_child(child) else: node.value[value[0]] = value[1] return using
def version_compare(major, minor, update, session=None): """ Determine/use the current libvirt library version on the system and compare input major, minor, and update values against it. If the running version is greater than or equal to the input params version, then return True; otherwise, return False This is designed to handle upstream version comparisons for test adjustments and/or comparisons as a result of upstream fixes or changes that could impact test results. :param major: Major version to compare against :param minor: Minor version to compare against :param update: Update value to compare against :param session: Shell session on remote host :return: True if running version is greater than or equal to the input libvirt version """ LIBVIRT_LIB_VERSION = 0 func = process.system_output if session: func = session.cmd_output try: regex = r'[Uu]sing\s*[Ll]ibrary:\s*[Ll]ibvirt\s*' regex += r'(\d+)\.(\d+)\.(\d+)' lines = to_text(func("virsh version")).splitlines() for line in lines: mobj = re.search(regex, line) if bool(mobj): LIBVIRT_LIB_VERSION = int(mobj.group(1)) * 1000000 + \ int(mobj.group(2)) * 1000 + \ int(mobj.group(3)) break except (ValueError, TypeError, AttributeError): logging.warning("Error determining libvirt version") return False compare_version = major * 1000000 + minor * 1000 + update if LIBVIRT_LIB_VERSION >= compare_version: return True return False
def to_str(self, summary=0, variants=0, **kwargs): # pylint: disable=W0613 if not self.variants: return "" out = [] for variant in self.variants: paths = ", ".join([x.path for x in variant["variant"]]) out.append(f"\nVariant {variant['variant_id']}: {paths}") env = set() for node in variant["variant"]: for key, value in node.environment.items(): origin = node.environment.origin[key].path env.add((f"{origin}:{key}", astring.to_text(value))) if not env: continue fmt = " %%-%ds => %%s" % max( # pylint: disable=C0209 [len(_[0]) for _ in env] ) for record in sorted(env): out.append(fmt % record) return "\n".join(out)
def _render(result): tests = [] for test in result.tests: fail_reason = test.get("fail_reason", UNKNOWN) if fail_reason is not None: fail_reason = astring.to_text(fail_reason) # Actually we are saving the TestID() there. test_id = test.get("name", UNKNOWN) if isinstance(test_id, TestID): name = test_id.name tests.append({ "id": str(test_id), "name": str(name), "start": test.get("time_start", -1), "end": test.get("time_end", -1), "time": test.get("time_elapsed", -1), "status": test.get("status", {}), "tags": test.get("tags") or {}, "whiteboard": test.get("whiteboard", UNKNOWN), "logdir": test.get("logdir", UNKNOWN), "logfile": test.get("logfile", UNKNOWN), "fail_reason": fail_reason, }) content = { "job_id": result.job_unique_id, "debuglog": result.logfile, "tests": tests, "total": result.tests_total, "pass": result.passed, "errors": result.errors, "failures": result.failed, "skip": result.skipped, "cancel": result.cancelled, "warn": result.warned, "interrupt": result.interrupted, "time": result.tests_total_time, } return json.dumps(content, sort_keys=True, indent=4, separators=(",", ": "))
def variant_to_str(variant, verbosity, out_args=None, debug=False): """ Reports human readable representation of a variant :param variant: Valid variant (list of TreeNode-like objects) :param verbosity: Output verbosity where 0 means brief :param out_args: Extra output arguments (currently unused) :param debug: Whether the variant contains and should report debug info :return: Human readable representation """ del out_args out = [] if not debug: paths = ", ".join([x.path for x in variant["variant"]]) else: color = output.TERM_SUPPORT.LOWLIGHT cend = output.TERM_SUPPORT.ENDC paths = ", ".join( [ f"{_.name}{color}@{getattr(_, 'yaml', 'Unknown')}{cend}" for _ in variant["variant"] ] ) out.append( "%sVariant %s: %s" % ("\n" if verbosity else "", variant["variant_id"], paths) ) if verbosity: env = set() for node in variant["variant"]: for key, value in node.environment.items(): origin = node.environment.origin[key].path env.add((f"{origin}:{key}", astring.to_text(value))) if not env: return out fmt = " %%-%ds => %%s" % max( # pylint: disable=C0209 [len(_[0]) for _ in env] ) for record in sorted(env): out.append(fmt % record) return out
def get_ovf_content(output): """ Find and read ovf file. """ export_domain_uuid, _, vol_uuid = get_all_uuids(output) export_vm_dir = os.path.join(mnt_point, export_domain_uuid, 'master/vms') ovf_content = "" if os.path.isdir(export_vm_dir): ovf_id = "ovf:id='%s'" % vol_uuid ret = to_text( process.system_output("grep -R \"%s\" %s" % (ovf_id, export_vm_dir))) ovf_file = ret.split(":")[0] if os.path.isfile(ovf_file): ovf_f = open(ovf_file, "r") ovf_content = ovf_f.read() ovf_f.close() else: logging.error("Can't find ovf file to read") return ovf_content
def cpus_info(vm, env="guest"): """ To get host cores, threads, sockets in the system :param vm: VM object :param env: guest or host :return: cpu sockets, cores, threads info as list """ if "guest" in env: session = vm.wait_for_login() output = session.cmd_output("lscpu") else: output = astring.to_text(process.system_output("lscpu", shell=True)) no_cpus = int(re.findall('CPU\(s\):\s*(\d+)', str(output))[0]) no_threads = int(re.findall('Thread\(s\)\sper\score:\s*(\d+)', str(output))[0]) no_cores = int(re.findall('Core\(s\)\sper\ssocket:\s*(\d+)', str(output))[0]) no_sockets = int(re.findall('Socket\(s\):\s*(\d+)', str(output))[0]) cpu_info = [no_cpus, no_threads, no_cores, no_sockets] if "guest" in env: session.close() return cpu_info
def create_from_yaml(paths, debug=False): """ Create tree structure from yaml-like file :param fileobj: File object to be processed :raise SyntaxError: When yaml-file is corrupted :return: Root of the created tree structure """ def _merge(data, path): """Normal run""" tmp = _create_from_yaml(path) if tmp: data.merge(tmp) def _merge_debug(data, path): """Use NamedTreeNodeDebug magic""" node_cls = get_named_tree_cls(path, mux.MuxTreeNodeDebug) tmp = _create_from_yaml(path, node_cls) if tmp: data.merge(tmp) if not debug: data = mux.MuxTreeNode() merge = _merge else: data = mux.MuxTreeNodeDebug() merge = _merge_debug path = None try: for path in paths: merge(data, path) # Yaml can raise IndexError on some files except (yaml.YAMLError, IndexError) as details: if (u'mapping values are not allowed in this context' in astring.to_text(details)): details = (u"%s\nMake sure !tags and colons are separated by a " u"space (eg. !include :)" % details) msg = u"Invalid multiplex file '%s': %s" % (path, details) raise IOError(2, msg, path) return data
def create_from_yaml(paths, debug=False): """ Create tree structure from yaml-like file :param fileobj: File object to be processed :raise SyntaxError: When yaml-file is corrupted :return: Root of the created tree structure """ def _merge(data, path): """Normal run""" tmp = _create_from_yaml(path) if tmp: data.merge(tmp) def _merge_debug(data, path): """Use NamedTreeNodeDebug magic""" node_cls = get_named_tree_cls(path, mux.MuxTreeNodeDebug) tmp = _create_from_yaml(path, node_cls) if tmp: data.merge(tmp) if not debug: data = mux.MuxTreeNode() merge = _merge else: data = mux.MuxTreeNodeDebug() merge = _merge_debug path = None try: for path in paths: merge(data, path) # Yaml can raise IndexError on some files except (yaml.YAMLError, IndexError) as details: if (u'mapping values are not allowed in this context' in astring.to_text(details)): details = (u"%s\nMake sure !tags and colons are separated by a " u"space (eg. !include :)" % details) msg = u"Invalid multiplex file '%s': %s" % (path, details) raise IOError(2, msg, path) return data
def translate_path(self, path): """ Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = urlparse(to_text(path))[2] path = posixpath.normpath(unquote(path)) words = path.split('/') words = list(filter(None, words)) path = self.server.cwd for word in words: _, word = os.path.splitdrive(word) _, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path
def get_content_by_encoding(url): """ Returns the content of the given URL, attempting to use server provided encoding. :param url: the url to be fetched :rtype: str :raises: URLError when the given url can not be retrieved """ http_response = download.url_open(url) content_type = None encoding = None if hasattr(http_response, 'headers'): content_type = http_response.headers['Content-Type'] elif hasattr(http_response, 'getheader'): content_type = http_response.getheader('Content-Type') if content_type is not None: match = re.match(r'^[az\\].*\; charset\=(.*)$', content_type) if match is not None: encoding = match.group(1) content = http_response.read() return astring.to_text(content, encoding)
def get_content_by_encoding(url): """ Returns the content of the given URL, attempting to use server provided encoding. :param url: the url to be fetched :rtype: str :raises: URLError when the given url can not be retrieved """ http_response = download.url_open(url) content_type = None encoding = None if hasattr(http_response, 'headers'): content_type = http_response.headers['Content-Type'] elif hasattr(http_response, 'getheader'): content_type = http_response.getheader('Content-Type') if content_type is not None: match = re.match(r'^[az\\].*\; charset\=(.*)$', content_type) if match is not None: encoding = match.group(1) content = http_response.read() return astring.to_text(content, encoding)
def dump_variant(variant): """Dump a variant into a json-serializable representation :param variant: Valid variant (list of TreeNode-like objects) :return: json-serializable representation """ def dump_tree_node(node): """ Turns TreeNode-like object into tuple(path, env_representation) """ return (astring.to_text(node.path), [(astring.to_text(node.environment.origin[key].path), astring.to_text(key), value) for key, value in node.environment.items()]) safe_variant = {} safe_variant["paths"] = [astring.to_text(pth) for pth in variant.get("paths")] safe_variant["variant_id"] = variant.get("variant_id") safe_variant["variant"] = [dump_tree_node(_) for _ in variant.get("variant", [])] return safe_variant
def _render(result): tests = [] for test in result.tests: fail_reason = test.get('fail_reason', UNKNOWN) if fail_reason is not None: fail_reason = astring.to_text(fail_reason) tags = test.get('tags') or {} tests.append({ 'id': str(test.get('name', UNKNOWN)), 'start': test.get('time_start', -1), 'end': test.get('time_end', -1), 'time': test.get('time_elapsed', -1), 'status': test.get('status', {}), 'tags': {k: list(v or {}) for k, v in tags.items()}, 'whiteboard': test.get('whiteboard', UNKNOWN), 'logdir': test.get('logdir', UNKNOWN), 'logfile': test.get('logfile', UNKNOWN), 'fail_reason': fail_reason }) content = { 'job_id': result.job_unique_id, 'debuglog': result.logfile, 'tests': tests, 'total': result.tests_total, 'pass': result.passed, 'errors': result.errors, 'failures': result.failed, 'skip': result.skipped, 'cancel': result.cancelled, 'warn': result.warned, 'interrupt': result.interrupted, 'time': result.tests_total_time } return json.dumps(content, sort_keys=True, indent=4, separators=(',', ': '))
def get_scratch_pkg_urls(self, pkg, arch=None): """ Gets the urls for the scratch packages specified in pkg :type pkg: KojiScratchPkgSpec :param pkg: a scratch package specification :type arch: string :param arch: packages built for this architecture, but also including architecture independent (noarch) packages """ rpm_urls = [] if arch is None: arch = os.uname()[4] arches = [arch, 'noarch'] index_url = "%s/%s/task_%s" % (self.get_scratch_base_url(), pkg.user, pkg.task) index_parser = KojiDirIndexParser() index_parser.feed(astring.to_text(urllib.request.urlopen(index_url).read())) if pkg.subpackages: for p in pkg.subpackages: for pfn in index_parser.package_file_names: r = RPMFileNameInfo(pfn) info = r.get_nvr_info() if (p == info['name'] and r.get_arch() in arches): rpm_urls.append("%s/%s" % (index_url, pfn)) else: for pfn in index_parser.package_file_names: if (RPMFileNameInfo(pfn).get_arch() in arches): rpm_urls.append("%s/%s" % (index_url, pfn)) return rpm_urls
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") exist_filter = params.get("exist_filter", "no-mac-spoofing") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") status_error = "yes" == params.get("status_error", "no") mount_noexec_tmp = "yes" == params.get("mount_noexec_tmp", "no") kill_libvirtd = "yes" == params.get("kill_libvirtd", "no") bug_url = params.get("bug_url", "") ipset_command = params.get("ipset_command") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) username = params.get("username") password = params.get("password") # Prepare vm filterref parameters dict list filter_param_list = [] params_key = [] for i in params.keys(): if 'parameter_name_' in i: params_key.append(i) params_key.sort() for i in range(len(params_key)): params_dict = {} params_dict['name'] = params[params_key[i]] params_dict['value'] = params['parameter_value_%s' % i] filter_param_list.append(params_dict) filterref_dict = {} filterref_dict['name'] = filter_name filterref_dict['parameters'] = filter_param_list # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd("virtqemud") device_name = None def clean_up_dirty_nwfilter_binding(): cmd_result = virsh.nwfilter_binding_list(debug=True) binding_list = cmd_result.stdout_text.strip().splitlines() binding_list = binding_list[2:] result = [] # If binding list is not empty. if binding_list: for line in binding_list: # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) logging.info("nwfilter binding list is: %s", result) for binding_uuid in result: try: virsh.nwfilter_binding_delete(binding_uuid) except Exception as e: logging.error( "Exception thrown while undefining nwfilter-binding: %s", str(e)) raise try: # Clean up dirty nwfilter binding if there are. clean_up_dirty_nwfilter_binding() rule = params.get("rule") if rule: # Create new filter xml filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() if mount_noexec_tmp: device_name = utlv.setup_or_cleanup_iscsi(is_setup=True) utlv.mkfs(device_name, 'ext4') cmd = "mount %s /tmp -o noexec,nosuid" % device_name process.run(cmd, shell=True) if ipset_command: pkg = "ipset" if not utils_package.package_install(pkg): test.cancel("Can't install ipset on host") process.run(ipset_command, shell=True) # Run command try: vm.start() if not mount_noexec_tmp: vm.wait_for_serial_login(username=username, password=password) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils_misc.wait_for(lambda: not process.system( check_cmd, ignore_status=True, shell=True), timeout=30) if not ret: test.fail("Rum command '%s' failed" % check_cmd) # This change anchors nwfilter_vm_start.possitive_test.new_filter.variable_notation case # The matched destination could be ip address or hostname if "iptables -L" in check_cmd and expect_match and 'ACCEPT' in expect_match: # ip address that need to be replaced replace_param = params.get("parameter_value_2") #Get hostname by ip address. hostname_info = None try: hostname_info = socket.gethostbyaddr(replace_param) except socket.error as e: logging.info( "Failed to get hostname from ip address with error: %s", str(e)) if hostname_info: # String is used to replace ip address replace_with = "%s|%s" % (replace_param, hostname_info[0]) expect_match = r"%s" % expect_match.replace( replace_param, replace_with) logging.debug("final iptables match string:%s", expect_match) out = astring.to_text( process.system_output(check_cmd, ignore_status=False, shell=True)) if expect_match and not re.search(expect_match, out): test.fail("'%s' not found in output: %s" % (expect_match, out)) except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case.\n error:" " %s\n%s" % (e, bug_url)) if kill_libvirtd: daemon_name = libvirtd.service_name pid = process.run('pidof %s' % daemon_name, shell=True).stdout_text.strip() cmd = "kill -s TERM %s" % pid process.run(cmd, shell=True) ret = utils_misc.wait_for(lambda: not libvirtd.is_running(), timeout=30) # After libvirt 5.6.0, libvirtd is using systemd socket activation by default if not ret and not libvirt_version.version_compare(5, 6, 0): test.fail("Failed to kill libvirtd. %s" % bug_url) finally: if kill_libvirtd: libvirtd.restart() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Undefine created filter if filter_name != exist_filter: virsh.nwfilter_undefine(filter_name, debug=True) if mount_noexec_tmp: if device_name: process.run("umount -l %s" % device_name, ignore_status=True, shell=True) utlv.setup_or_cleanup_iscsi(is_setup=False) if ipset_command: process.run("ipset destroy blacklist", shell=True)
def test_to_text_decode_is_text(self): self.assertTrue(astring.is_text(astring.to_text(b'', 'ascii'))) self.assertTrue(astring.is_text(astring.to_text('', 'ascii'))) self.assertTrue(astring.is_text(astring.to_text(u'', 'ascii')))
def tests(self): mapping = { "SKIP": "warning", "ABORT": "danger", "ERROR": "danger", "FAIL": "danger", "WARN": "warning", "PASS": "******", "START": "info", "ALERT": "danger", "RUNNING": "info", "NOSTATUS": "info", "INTERRUPTED": "danger", "CANCEL": "warning" } test_info = [] results_dir = self.results_dir(False) for tst in self.result.tests: formatted = {} formatted['uid'] = tst['name'].uid formatted['name'] = tst['name'].name if 'params' in tst: params = '' try: parameters = 'Params:\n' for path, key, value in tst['params']: parameters += ' %s:%s => %s\n' % (path, key, value) except KeyError: pass else: params = parameters else: params = "No params" formatted['params'] = params formatted['variant'] = tst['name'].variant or '' formatted['status'] = tst['status'] logdir = os.path.join(results_dir, 'test-results', tst['logdir']) formatted['logdir'] = os.path.relpath(logdir, self.html_output_dir) logfile = os.path.join(logdir, 'debug.log') formatted['logfile'] = os.path.relpath(logfile, self.html_output_dir) formatted['logfile_basename'] = os.path.basename(logfile) formatted['time'] = "%.2f" % tst['time_elapsed'] local_time_start = time.localtime(tst['time_start']) formatted['time_start'] = time.strftime("%Y-%m-%d %H:%M:%S", local_time_start) formatted['row_class'] = mapping[tst['status']] exhibition_limit = 40 fail_reason = tst.get('fail_reason') if fail_reason is None: fail_reason = '<unknown>' fail_reason = astring.to_text(fail_reason) if len(fail_reason) > exhibition_limit: fail_reason = ('<a data-container="body" ' 'data-toggle="popover" ' 'data-placement="top" ' 'title="Error Details" ' 'data-content="%s">%s...</a>' % (fail_reason, fail_reason[:exhibition_limit])) formatted['fail_reason'] = fail_reason test_info.append(formatted) return test_info
def _feed_html_parser(self, url, parser): try: data = urlopen(url).read() parser.feed(astring.to_text(data, self.HTML_ENCODING)) except HTTPError: raise ImageProviderError(f"Cannot open {self.url_versions}")
def test_to_text_is_text(self): self.assertTrue(astring.is_text(astring.to_text(b''))) self.assertTrue(astring.is_text(astring.to_text(''))) self.assertTrue(astring.is_text(astring.to_text(u'')))
def test_to_text_is_text(self): self.assertTrue(astring.is_text(astring.to_text(b''))) self.assertTrue(astring.is_text(astring.to_text(''))) self.assertTrue(astring.is_text(astring.to_text(u'')))
def run(test, params, env): """ Test update filter rules when domain is running. 1) Prepare parameters. 2) Add filter to domain interface. 3) Start domain. 4) Update filter rule and check 5) Cleanup """ # Prepare parameters filter_name = params.get("filter_name", "testcase") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") check_vm_cmd = params.get("check_vm_cmd") vm_expect_match = params.get("vm_expect_match") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) filterref_dict = {} filterref_dict['name'] = filter_name # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) new_filter = libvirt_xml.NwfilterXML() filter_backup = new_filter.new_from_filter_dumpxml(filter_name) try: # Update first vm interface with filter vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] vmxml.del_device(iface_xml) new_iface = interface.Interface('network') new_iface.xml = iface_xml.xml new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref logging.debug("new interface xml is: %s" % new_iface) vmxml.add_device(new_iface) vmxml.sync() # Start vm vm.start() session = vm.wait_for_login() vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = vmxml.get_devices('interface')[0] iface_target = iface_xml.target['dev'] logging.debug("iface target dev name is %s", iface_target) # Update filter rule by nwfilter-define filterxml = utlv.create_nwfilter_xml(params) # Define filter xml virsh.nwfilter_define(filterxml.xml, debug=True) # Check ebtables on host after filter update if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils_misc.wait_for(lambda: not process.system( check_cmd, ignore_status=True, shell=True), timeout=30) if not ret: test.fail("Rum command '%s' failed" % check_cmd) out = astring.to_text( process.system_output(check_cmd, ignore_status=False, shell=True)) if expect_match and not re.search(expect_match, out): test.fail("'%s' not found in output: %s" % (expect_match, out)) # Check in vm if check_vm_cmd: output = session.cmd_output(check_vm_cmd) logging.debug("cmd output: %s", output) if vm_expect_match and not re.search(vm_expect_match, output): test.fail("'%s' not found in output: %s" % (vm_expect_match, output)) finally: # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync() # Restore created filter virsh.nwfilter_undefine(filter_name, debug=True) virsh.nwfilter_define(filter_backup.xml, debug=True)
def download_file(asset_info, interactive=False, force=False): """ Verifies if file that can be find on url is on destination with right hash. This function will verify the SHA1 hash of the file. If the file appears to be missing or corrupted, let the user know. :param asset_info: Dictionary returned by get_asset_info """ file_ok = False problems_ignored = False had_to_download = False sha1 = None url = asset_info['url'] sha1_url = asset_info['sha1_url'] destination = asset_info['destination'] title = asset_info['title'] if sha1_url is not None: try: LOG.info("Verifying expected SHA1 sum from %s", sha1_url) sha1_file = urllib.request.urlopen(sha1_url) sha1_contents = astring.to_text(sha1_file.read()) sha1 = sha1_contents.split(" ")[0] LOG.info("Expected SHA1 sum: %s", sha1) except Exception as e: LOG.error("Failed to get SHA1 from file: %s", e) else: sha1 = None destination_dir = os.path.dirname(destination) if not os.path.isdir(destination_dir): os.makedirs(destination_dir) if not os.path.isfile(destination): LOG.warning("File %s not found", destination) if interactive: answer = genio.ask("Would you like to download it from %s?" % url) else: answer = 'y' if answer == 'y': try: download.url_download_interactive(url, destination, "Downloading %s" % title) had_to_download = True except Exception as download_failure: LOG.error("Check your internet connection: %s", download_failure) else: LOG.warning("Missing file %s", destination) else: LOG.info("Found %s", destination) if sha1 is None: answer = 'n' else: answer = 'y' if answer == 'y': actual_sha1 = crypto.hash_file(destination, algorithm='sha1') if actual_sha1 != sha1: LOG.info("Actual SHA1 sum: %s", actual_sha1) if interactive: answer = genio.ask("The file seems corrupted or outdated. " "Would you like to download it?") else: LOG.info("The file seems corrupted or outdated") answer = 'y' if answer == 'y': LOG.info("Updating image to the latest available...") while not file_ok: try: download.url_download_interactive( url, destination, title) except Exception as download_failure: LOG.error("Check your internet connection: %s", download_failure) sha1_post_download = crypto.hash_file(destination, algorithm='sha1') had_to_download = True if sha1_post_download != sha1: LOG.error("Actual SHA1 sum: %s", actual_sha1) if interactive: answer = genio.ask("The file downloaded %s is " "corrupted. Would you like " "to try again?" % destination) else: answer = 'n' if answer == 'n': problems_ignored = True LOG.error("File %s is corrupted" % destination) file_ok = True else: file_ok = False else: file_ok = True else: file_ok = True LOG.info("SHA1 sum check OK") else: problems_ignored = True LOG.info("File %s present, but did not verify integrity", destination) if file_ok: if not problems_ignored: LOG.info("%s present, with proper checksum", destination) uncompress_asset(asset_info=asset_info, force=force or had_to_download)
def _escape_attr(self, attrib): attrib = ''.join(_ if _ in self.PRINTABLE else "\\x%02x" % ord(_) for _ in astring.to_text(attrib, encoding='utf-8')) return attrib
def run(test, params, env): """ libvirt smt test: 1) prepare the guest with given topology 2) Start and login to the guest 3) Check for ppc64_cpu --smt and smt should be on 4) ppc64_cpu --smt=off and smt should be off 5) ppc64_cpu --smt=on and smt should be on 6) Check for core present using ppc64_cpu 7) Check for online core using ppc64_cpu 8) Check for lscpu for thread, core, socket info updated properly 9) Change the number of cores and check in lscpu :param test: QEMU test object :param params: Dictionary with the test parameters :param env: Dictionary with test environment. """ error_count = 0 def smt_check(vm, cmd, output, extra=None, ignorestatus=False): """ Run and check SMT command inside guest :param vm: VM object :param cmd: Given smt command :param output: Expected output :param extra: Extra output to be added :param ignorestatus: True or False to ignore status :return: error count """ err_count = 0 session = vm.wait_for_login() actual_output = session.cmd_output(cmd).strip() return_output = session.cmd_output('echo $?').strip() if extra: expected_output = output + extra else: expected_output = output if expected_output != actual_output: logging.error("Command: %s failed\nActual output: %s\nExpected " "output: %s", cmd, actual_output, expected_output) if int(return_output) == 0 and not ignorestatus: logging.error("Command: %s returned zero" "\n Expecting a non zero number", cmd) err_count = 1 else: if int(return_output) != 0 and not ignorestatus: logging.error("Command: %s returned non-zero" "\n Expecting zero", cmd) err_count += 1 else: logging.debug("Command: %s ran successfully", cmd) session.close() return err_count def cpus_info(vm, env="guest"): """ To get host cores, threads, sockets in the system :param vm: VM object :param env: guest or host :return: cpu sockets, cores, threads info as list """ if "guest" in env: session = vm.wait_for_login() output = session.cmd_output("lscpu") else: output = astring.to_text(process.system_output("lscpu", shell=True)) no_cpus = int(re.findall('CPU\(s\):\s*(\d+)', str(output))[0]) no_threads = int(re.findall('Thread\(s\)\sper\score:\s*(\d+)', str(output))[0]) no_cores = int(re.findall('Core\(s\)\sper\ssocket:\s*(\d+)', str(output))[0]) no_sockets = int(re.findall('Socket\(s\):\s*(\d+)', str(output))[0]) cpu_info = [no_cpus, no_threads, no_cores, no_sockets] if "guest" in env: session.close() return cpu_info vm_name = params.get("main_vm") smt_chk_cmd = params.get("smt_chk_cmd", "ppc64_cpu --smt") smt_on_cmd = params.get("smt_on_cmd", "ppc64_cpu --smt=on") smt_off_cmd = params.get("smt_off_cmd", "ppc64_cpu --smt=off") smt_core_pst_cmd = params.get("smt_core_present_cmd", "ppc64_cpu --cores-present") smt_core_on_cmd = params.get("smt_core_on_cmd", "ppc64_cpu --cores-on") smt_chk_on_output = params.get("smt_chk_on_output", "SMT is on") smt_chk_off_output = params.get("smt_chk_off_output", "SMT is off") smt_core_pst_output = params.get("smt_core_pst_output", "Number of cores present =") smt_core_on_output = params.get("smt_core_on_output", "Number of cores online =") smt_threads_per_core_cmd = params.get("smt_threads_per_core_cmd", "ppc64_cpu --threads-per-core") smt_threads_per_core_output = params.get("smt_threads_per_core_ouput", "Threads per core:") status_error = params.get("status_error", "no") == "yes" ignore_status = params.get("ignore_status", "no") == "yes" smt_number = params.get("smt_number", None) max_vcpu = current_vcpu = int(params.get("smt_smp", 8)) vm_cores = int(params.get("smt_vcpu_cores", 8)) vm_threads = int(params.get("smt_vcpu_threads", 1)) vm_sockets = int(params.get("smt_vcpu_sockets", 1)) vm = env.get_vm(vm_name) output = astring.to_text(process.system_output(smt_threads_per_core_cmd, shell=True)) try: host_threads = int(re.findall('Threads per core:\s+(\d+)', output)[0]) except Exception as err: test.cancel("Unable to get the host threads\n %s" % err) logging.info("Guest: cores:%d, threads:%d, sockets:%d", vm_cores, vm_threads, vm_sockets) try: vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) org_xml = vmxml.copy() vm.destroy() # Initial Setup of vm vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu, vm_sockets, vm_cores, vm_threads, add_topology=True) try: vm.start() if status_error: test.fail("VM Started with invalid thread %s" % vm_threads) except virt_vm.VMStartError as detail: if not status_error: test.fail("VM failed to start %s" % detail) if not status_error: # try installing powerpc-utils in guest if not skip try: session = vm.wait_for_login() utils_package.package_install(["powerpc-utils"], session, 360) session.close() except Exception as err: test.cancel("Unable to install powerpc-utils package in guest\n %s" % err) # Changing the smt number if smt_number: smt_chk_cmd_mod = "%s=%s" % (smt_chk_cmd, smt_number) error_count += smt_check(vm, smt_chk_cmd_mod, "") guest_cpu_details = cpus_info(vm) # Step 10: Check for threads, cores, sockets if vm_cores != guest_cpu_details[2]: logging.error("Number of cores mismatch:\nExpected number of " "cores: %s\nActual number of cores: %s", vm_cores, guest_cpu_details[2]) error_count += 1 if smt_number: threads = int(smt_number) else: threads = vm_threads if threads != guest_cpu_details[1]: logging.error("Number of threads mismatch:\nExpected number of " "threads: %s\nActual number of threads: %s", threads, guest_cpu_details[1]) error_count += 1 if vm_sockets != guest_cpu_details[3]: logging.error("Number of sockets mismatch:\nExpected number of " "sockets: %s\nActual number of sockets: %s", vm_sockets, guest_cpu_details[3]) error_count += 1 error_count += smt_check(vm, smt_chk_cmd, smt_chk_on_output, ignorestatus=ignore_status) session = vm.wait_for_login() session.cmd_output(smt_off_cmd) session.close() error_count += smt_check(vm, smt_chk_cmd, smt_chk_off_output, ignorestatus=ignore_status) cores = vm_cores * vm_sockets extra = " %s" % cores error_count += smt_check(vm, smt_core_pst_cmd, smt_core_pst_output, extra) extra = " %s" % cores error_count += smt_check(vm, smt_core_on_cmd, smt_core_on_output, extra) extra = " %s" % vm_threads error_count += smt_check(vm, smt_threads_per_core_cmd, smt_threads_per_core_output, extra) # Changing the cores cores -= 1 while cores > 1: smt_core_on_cmd_mod = "%s=%s" % (smt_core_on_cmd, cores) error_count += smt_check(vm, smt_core_on_cmd_mod, "") extra = " %s" % cores error_count += smt_check(vm, smt_core_on_cmd, smt_core_on_output, extra) guest_cpu_details = cpus_info(vm) if cores != (guest_cpu_details[3] * guest_cpu_details[2]): logging.error("The core changes through command: %s not " "reflected in lscpu output", smt_core_on_cmd_mod) error_count += 1 cores -= 1 # wait for sometime before next change of cores time.sleep(5) if error_count > 0: test.fail("The SMT feature has issue, please consult " "previous errors more details") finally: org_xml.sync()
def test_basic_functions(self): # repr self.assertEqual("MuxTreeNode(name='hw')", repr(self.tree.children[0])) # str self.assertEqual( "/distro/\u0161mint: init=systemv", astring.to_text(self.tree.children[1].children[1]), ) # len self.assertEqual(8, len(self.tree)) # number of leaves # __iter__ self.assertEqual(8, sum((1 for _ in self.tree))) # number of leaves # .root self.assertEqual( id(self.tree), id(self.tree.children[0].children[0].children[0].root) ) # .parents self.assertEqual(["hw", ""], self.tree.children[0].children[0].parents) # environment / (root) self.assertEqual({}, self.tree.environment) # environment /hw (nodes first) self.assertEqual( {"corruptlist": ["upper_node_list"]}, self.tree.children[0].environment ) cpu = self.tree.children[0].children[0] # environment /hw/cpu (mixed env) self.assertEqual( {"corruptlist": ["upper_node_list"], "joinlist": ["first_item"]}, cpu.environment, ) # environment /hw/cpu/amd (list extension) vals = { "corruptlist": ["upper_node_list"], "cpu_CFLAGS": "-march=athlon64", "joinlist": ["first_item", "second", "third"], } self.assertEqual(vals, cpu.children[1].environment) # environment /hw/cpu/arm (deep env) vals = { "corruptlist": ["upper_node_list"], "joinlist": ["first_item"], "cpu_CFLAGS": "-mabi=apcs-gnu " "-march=armv8-a -mtune=arm8", } self.assertEqual(vals, cpu.children[2].environment) # environment /hw/disk (list -> string) vals = {"corruptlist": "nonlist", "disk_type": "virtio"} disk = self.tree.children[0].children[1] self.assertEqual(vals, disk.environment) # environment /hw/disk/scsi (string -> list) vals = {"corruptlist": ["againlist"], "disk_type": "scsi"} self.assertEqual(vals, disk.children[0].environment) # environment /env vals = {"opt_CFLAGS": "-Os"} self.assertEqual(vals, self.tree.children[2].environment) # leaves order leaves = [ "intel", "amd", "arm", "scsi", "virtio", "fedora", "\u0161mint", "prod", ] self.assertEqual(leaves, self.tree.get_leaves()) tree_view = tree.tree_view(self.tree, 0, False) # ascii treeview contains only ascii chars tree_view.decode("ascii") # ascii treeview contain all leaves for leaf in leaves: # In ascii mode we replace non-ascii character using # xmlcharrefreplace, make sure this is performed leaf = leaf.encode("ascii", errors="xmlcharrefreplace") self.assertIn(leaf, tree_view, f"Leaf {leaf} not in ascii:\n{tree_view}")
def _log_line(self, line, newline_for_stream='\n'): line = astring.to_text(line, self._result.encoding, 'replace') if self._logger is not None: self._logger.debug(self._logger_prefix, line) if self._stream_logger is not None: self._stream_logger.debug(line + newline_for_stream)
def test_basic_functions(self): # repr self.assertEqual("MuxTreeNode(name='hw')", repr(self.tree.children[0])) # str self.assertEqual(u"/distro/\u0161mint: init=systemv", astring.to_text(self.tree.children[1].children[1])) # len self.assertEqual(8, len(self.tree)) # number of leaves # __iter__ self.assertEqual(8, sum((1 for _ in self.tree))) # number of leaves # .root self.assertEqual(id(self.tree), id(self.tree.children[0].children[0].children[0].root) ) # .parents self.assertEqual(['hw', ''], self.tree.children[0].children[0].parents) # environment / (root) self.assertEqual({}, self.tree.environment) # environment /hw (nodes first) self.assertEqual({'corruptlist': ['upper_node_list']}, self.tree.children[0].environment) cpu = self.tree.children[0].children[0] # environment /hw/cpu (mixed env) self.assertEqual({'corruptlist': ['upper_node_list'], 'joinlist': ['first_item']}, cpu.environment) # environment /hw/cpu/amd (list extension) vals = {'corruptlist': ['upper_node_list'], 'cpu_CFLAGS': '-march=athlon64', 'joinlist': ['first_item', 'second', 'third']} self.assertEqual(vals, cpu.children[1].environment) # environment /hw/cpu/arm (deep env) vals = {'corruptlist': ['upper_node_list'], 'joinlist': ['first_item'], 'cpu_CFLAGS': '-mabi=apcs-gnu ' '-march=armv8-a -mtune=arm8'} self.assertEqual(vals, cpu.children[2].environment) # environment /hw/disk (list -> string) vals = {'corruptlist': 'nonlist', 'disk_type': 'virtio'} disk = self.tree.children[0].children[1] self.assertEqual(vals, disk.environment) # environment /hw/disk/scsi (string -> list) vals = {'corruptlist': ['againlist'], 'disk_type': 'scsi'} self.assertEqual(vals, disk.children[0].environment) # environment /env vals = {'opt_CFLAGS': '-Os'} self.assertEqual(vals, self.tree.children[2].environment) # leaves order leaves = ['intel', 'amd', 'arm', 'scsi', 'virtio', 'fedora', u'\u0161mint', 'prod'] self.assertEqual(leaves, self.tree.get_leaves()) tree_view = tree.tree_view(self.tree, 0, False) # ascii treeview contains only ascii chars tree_view.decode('ascii') # ascii treeview contain all leaves for leaf in leaves: # In ascii mode we replace non-ascii character using # xmlcharrefreplace, make sure this is performed leaf = leaf.encode('ascii', errors='xmlcharrefreplace') self.assertIn(leaf, tree_view, "Leaf %s not in ascii:\n%s" % (leaf, tree_view))
def test_to_text_decode_is_text(self): self.assertTrue(astring.is_text(astring.to_text(b'', 'ascii'))) self.assertTrue(astring.is_text(astring.to_text('', 'ascii'))) self.assertTrue(astring.is_text(astring.to_text(u'', 'ascii')))
def run(test, params, env): """ Test start domain with nwfilter rules. 1) Prepare parameters. 2) Prepare nwfilter rule and update domain interface to apply. 3) Start domain and check rule. 4) Clean env """ # Prepare parameters filter_name = params.get("filter_name", "testcase") attach_option = params.get("attach_option", "") check_cmd = params.get("check_cmd") expect_match = params.get("expect_match") attach_twice_invalid = "yes" == params.get("attach_twice_invalid", "no") status_error = "yes" == params.get("status_error", "no") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) # Prepare vm filterref parameters dict list filterref_dict = {} filterref_dict['name'] = filter_name # Prepare interface parameters iface_type = 'network' iface_source = {'network': 'default'} iface_target = params.get("iface_target", 'vnet1') # backup vm xml vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name) libvirtd = utils_libvirtd.Libvirtd() try: # Prepare interface xml for attach new_iface = interface.Interface(type_name=iface_type) new_iface.source = iface_source new_iface.target = {'dev': iface_target} new_filterref = new_iface.new_filterref(**filterref_dict) new_iface.filterref = new_filterref new_iface.model = "virtio" logging.debug("new interface xml is: %s" % new_iface) # Attach interface to vm ret = virsh.attach_device(vm_name, new_iface.xml, flagstr=attach_option, debug=True, ignore_status=True) utlv.check_exit_status(ret, status_error) if attach_twice_invalid: ret = virsh.attach_device(vm_name, new_iface.xml, flagstr=attach_option, debug=True, ignore_status=True) utlv.check_exit_status(ret, status_error) if not libvirtd.is_running(): test.fail("libvirtd not running after attach " "interface.") # Check iptables or ebtables on host if check_cmd: if "DEVNAME" in check_cmd: check_cmd = check_cmd.replace("DEVNAME", iface_target) ret = utils_misc.wait_for(lambda: not process.system(check_cmd, ignore_status=True, shell=True), timeout=30) if not ret: test.fail("Rum command '%s' failed" % check_cmd) out = astring.to_text(process.system_output(check_cmd, ignore_status=False, shell=True)) if expect_match and not re.search(expect_match, out): test.fail("'%s' not found in output: %s" % (expect_match, out)) finally: if attach_twice_invalid: libvirtd.restart() # Clean env if vm.is_alive(): vm.destroy(gracefully=False) # Recover xml of vm. vmxml_backup.sync()
def run(test, params, env): """ Test DAC setting in both domain xml and qemu.conf. (1) Init variables for test. (2) Set VM xml and qemu.conf with proper DAC label, also set monitor socket parent dir with propoer ownership and mode. (3) Start VM and check the context. """ # Get general variables. status_error = ('yes' == params.get("status_error", 'no')) host_sestatus = params.get("host_selinux", "enforcing") # Get variables about seclabel for VM. sec_type = params.get("vm_sec_type", "dynamic") vm_sec_model = params.get("vm_sec_model", "dac") vm_sec_label = params.get("vm_sec_label", None) vm_sec_relabel = params.get("vm_sec_relabel", "yes") sec_dict = { 'type': sec_type, 'model': vm_sec_model, 'relabel': vm_sec_relabel } if vm_sec_label: sec_dict['label'] = vm_sec_label set_qemu_conf = "yes" == params.get("set_qemu_conf", "no") # Get per-img seclabel variables disk_type = params.get("disk_type") disk_target = params.get('disk_target') disk_src_protocol = params.get("disk_source_protocol") vol_name = params.get("vol_name") tmp_dir = data_dir.get_tmp_dir() pool_name = params.get("pool_name", "gluster-pool") brick_path = os.path.join(tmp_dir, pool_name) invalid_label = 'yes' == params.get("invalid_label", "no") relabel = params.get("per_img_sec_relabel") sec_label = params.get("per_img_sec_label") per_sec_model = params.get("per_sec_model", 'dac') per_img_dict = { 'sec_model': per_sec_model, 'relabel': relabel, 'sec_label': sec_label } params.update(per_img_dict) # Get qemu.conf config variables qemu_user = params.get("qemu_user", 'qemu') qemu_group = params.get("qemu_group", 'qemu') dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes") # Get variables about VM and get a VM object and VMXML instance. vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() # Set selinux of host. backup_sestatus = utils_selinux.get_status() if backup_sestatus == "disabled": test.cancel("SELinux is in Disabled " "mode. it must be in Enforcing " "mode to run this test") utils_selinux.set_status(host_sestatus) qemu_sock_mod = False qemu_sock_path = '/var/lib/libvirt/qemu/' qemu_conf = utils_config.LibvirtQemuConfig() libvirtd = utils_libvirtd.Libvirtd() try: if set_qemu_conf: # Set qemu.conf for user and group if qemu_user: qemu_conf.user = qemu_user if qemu_group: qemu_conf.group = qemu_group if dynamic_ownership: qemu_conf.dynamic_ownership = 1 else: qemu_conf.dynamic_ownership = 0 logging.debug("the qemu.conf content is: %s" % qemu_conf) libvirtd.restart() st = os.stat(qemu_sock_path) if not bool(st.st_mode & stat.S_IWGRP): # chmod g+w os.chmod(qemu_sock_path, st.st_mode | stat.S_IWGRP) qemu_sock_mod = True # Set the context of the VM. logging.debug("sec_dict is %s" % sec_dict) vmxml.set_seclabel([sec_dict]) vmxml.sync() # Get per-image seclabel in id string if sec_label: per_img_usr, per_img_grp = sec_label.split(':') sec_label_id = format_user_group_str(per_img_usr, per_img_grp) # Start VM to check the qemu process and image. try: # Set per-img sec context and start vm utlv.set_vm_disk(vm, params) # Start VM successfully. if status_error: if invalid_label: # invalid label should fail, more info in bug 1165485 logging.debug( "The guest failed to start as expected," "details see bug: bugzilla.redhat.com/show_bug.cgi" "?id=1165485") else: test.fail("Test succeeded in negative case.") # Get vm process label when VM is running. vm_pid = vm.get_pid() pid_stat = os.stat("/proc/%d" % vm_pid) vm_process_uid = pid_stat.st_uid vm_process_gid = pid_stat.st_gid vm_context = "%s:%s" % (vm_process_uid, vm_process_gid) logging.debug("vm process label is: %s", vm_context) # Get vm image label when VM is running if disk_type != "network": disks = vm.get_blk_devices() if libvirt_version.version_compare(3, 1, 0) and disk_type == "block": output = astring.to_text( process.system_output( "nsenter -t %d -m -- ls -l %s" % (vm_pid, disks[disk_target]['source']))) owner, group = output.strip().split()[2:4] disk_context = format_user_group_str(owner, group) else: stat_re = os.stat(disks[disk_target]['source']) disk_context = "%s:%s" % (stat_re.st_uid, stat_re.st_gid) logging.debug("The disk dac label after vm start is: %s", disk_context) if sec_label and relabel == 'yes': if disk_context != sec_label_id: test.fail("The disk label is not equal to " "'%s'." % sec_label_id) except virt_vm.VMStartError as e: # Starting VM failed. if not status_error: test.fail("Test failed in positive case." "error: %s" % e) finally: # clean up if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if qemu_sock_mod: st = os.stat(qemu_sock_path) os.chmod(qemu_sock_path, st.st_mode ^ stat.S_IWGRP) if set_qemu_conf: qemu_conf.restore() libvirtd.restart() utils_selinux.set_status(backup_sestatus) if disk_src_protocol == 'iscsi': utlv.setup_or_cleanup_iscsi(is_setup=False) elif disk_src_protocol == 'gluster': gluster.setup_or_cleanup_gluster(False, brick_path=brick_path, **params) libvirtd.restart() elif disk_src_protocol == 'netfs': utlv.setup_or_cleanup_nfs(is_setup=False, restore_selinux=backup_sestatus)
def check_result(cmd, result, status_error): """ Check virt-v2v command result """ utils_v2v.check_exit_status(result, status_error, error_flag) output = to_text(result.stdout + result.stderr, errors=error_flag) output_stdout = to_text(result.stdout, errors=error_flag) if status_error: if checkpoint == 'length_of_error': log_lines = output.split('\n') v2v_start = False for line in log_lines: if line.startswith('virt-v2v:'): v2v_start = True if line.startswith('libvirt:'): v2v_start = False # 76 is the max length in v2v if v2v_start and len(line) > 76: test.fail('Error log longer than 76 charactors: %s' % line) if checkpoint == 'disk_not_exist': vol_list = virsh.vol_list(pool_name) logging.info(vol_list) if vm_name in vol_list.stdout: test.fail('Disk exists for vm %s' % vm_name) else: if output_mode == "rhev" and checkpoint != 'quiet': ovf = get_ovf_content(output) logging.debug("ovf content: %s", ovf) check_ovf_snapshot_id(ovf) if '--vmtype' in cmd: expected_vmtype = re.findall(r"--vmtype\s(\w+)", cmd)[0] check_vmtype(ovf, expected_vmtype) if '-oa' in cmd and '--no-copy' not in cmd: expected_mode = re.findall(r"-oa\s(\w+)", cmd)[0] img_path = get_img_path(output) def check_alloc(): try: check_image(img_path, "allocation", expected_mode) return True except exceptions.TestFail: pass if not utils_misc.wait_for(check_alloc, timeout=600, step=10.0): test.fail('Allocation check failed.') if '-of' in cmd and '--no-copy' not in cmd and '--print-source' not in cmd and checkpoint != 'quiet' and not no_root: expected_format = re.findall(r"-of\s(\w+)", cmd)[0] img_path = get_img_path(output) check_image(img_path, "format", expected_format) if '-on' in cmd: expected_name = re.findall(r"-on\s(\w+)", cmd)[0] check_new_name(output, expected_name) if '--no-copy' in cmd: check_nocopy(output) if '-oc' in cmd: expected_uri = re.findall(r"-oc\s(\S+)", cmd)[0] check_connection(output, expected_uri) if output_mode == "rhev": if not utils_v2v.import_vm_to_ovirt(params, address_cache): test.fail("Import VM failed") else: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True if output_mode == "libvirt": if "qemu:///session" not in v2v_options and not no_root: virsh.start(vm_name, debug=True, ignore_status=False) if checkpoint in ['vmx', 'vmx_ssh']: vmchecker = VMChecker(test, params, env) params['vmchecker'] = vmchecker params['vmcheck_flag'] = True ret = vmchecker.run() if len(ret) == 0: logging.info("All common checkpoints passed") if checkpoint == 'quiet': if len(output.strip().splitlines()) > 10: test.fail('Output is not empty in quiet mode') if checkpoint == 'dependency': if 'libguestfs-winsupport' not in output: test.fail('libguestfs-winsupport not in dependency') if all(pkg_pattern not in output for pkg_pattern in ['VMF', 'edk2-ovmf']): test.fail('OVMF/AAVMF not in dependency') if 'qemu-kvm-rhev' in output: test.fail('qemu-kvm-rhev is in dependency') if 'libX11' in output: test.fail('libX11 is in dependency') if 'kernel-rt' in output: test.fail('kernel-rt is in dependency') win_img = params.get('win_image') command = 'guestfish -a %s -i' if process.run(command % win_img, ignore_status=True).exit_status == 0: test.fail('Command "%s" success' % command % win_img) if checkpoint == 'no_dcpath': if '--dcpath' in output: test.fail('"--dcpath" is not removed') if checkpoint == 'debug_overlays': search = re.search('Overlay saved as(.*)', output) if not search: test.fail('Not find log of saving overlays') overlay_path = search.group(1).strip() logging.debug('Overlay file location: %s' % overlay_path) if os.path.isfile(overlay_path): logging.info('Found overlay file: %s' % overlay_path) else: test.fail('Overlay file not saved') if checkpoint.startswith('empty_nic_source'): target_str = '%s "eth0" mac: %s' % (params[checkpoint][0], params[checkpoint][1]) logging.info('Expect log: %s', target_str) if target_str not in output_stdout.lower(): test.fail('Expect log not found: %s' % target_str) if checkpoint == 'print_source': check_source(output_stdout) if checkpoint == 'machine_readable': if os.path.exists(params.get('example_file', '')): # Checking items in example_file exist in latest # output regardless of the orders and new items. with open(params['example_file']) as f: for line in f: if line.strip() not in output_stdout.strip(): test.fail( '%s not in --machine-readable output' % line.strip()) else: test.error('No content to compare with') if checkpoint == 'compress': img_path = get_img_path(output) logging.info('Image path: %s', img_path) qemu_img_cmd = 'qemu-img check %s' % img_path qemu_img_locking_feature_support = libvirt_storage.check_qemu_image_lock_support( ) if qemu_img_locking_feature_support: qemu_img_cmd = 'qemu-img check %s -U' % img_path disk_check = process.run(qemu_img_cmd).stdout_text logging.info(disk_check) compress_info = disk_check.split(',')[-1].split('%')[0].strip() compress_rate = float(compress_info) logging.info('%s%% compressed', compress_rate) if compress_rate < 0.1: test.fail('Disk image NOT compressed') if checkpoint == 'tail_log': messages = params['tail'].get_output() logging.info('Content of /var/log/messages during conversion:') logging.info(messages) msg_content = params['msg_content'] if msg_content in messages: test.fail('Found "%s" in /var/log/messages' % msg_content) if checkpoint == 'print_estimate_tofile': check_print_estimate(estimate_file) log_check = utils_v2v.check_log(params, output) if log_check: test.fail(log_check) check_man_page(params.get('in_man'), params.get('not_in_man'))
def _escape_attr(self, attrib): attrib = ''.join(_ if _ in self.PRINTABLE else "\\x%02x" % ord(_) for _ in astring.to_text(attrib, encoding='utf-8')) return attrib
def test_basic_functions(self): # repr self.assertEqual("MuxTreeNode(name='hw')", repr(self.tree.children[0])) # str self.assertEqual(u"/distro/\u0161mint: init=systemv", astring.to_text(self.tree.children[1].children[1])) # len self.assertEqual(8, len(self.tree)) # number of leaves # __iter__ self.assertEqual(8, sum((1 for _ in self.tree))) # number of leaves # .root self.assertEqual( id(self.tree), id(self.tree.children[0].children[0].children[0].root)) # .parents self.assertEqual(['hw', ''], self.tree.children[0].children[0].parents) # environment / (root) self.assertEqual({}, self.tree.environment) # environment /hw (nodes first) self.assertEqual({'corruptlist': ['upper_node_list']}, self.tree.children[0].environment) cpu = self.tree.children[0].children[0] # environment /hw/cpu (mixed env) self.assertEqual( { 'corruptlist': ['upper_node_list'], 'joinlist': ['first_item'] }, cpu.environment) # environment /hw/cpu/amd (list extension) vals = { 'corruptlist': ['upper_node_list'], 'cpu_CFLAGS': '-march=athlon64', 'joinlist': ['first_item', 'second', 'third'] } self.assertEqual(vals, cpu.children[1].environment) # environment /hw/cpu/arm (deep env) vals = { 'corruptlist': ['upper_node_list'], 'joinlist': ['first_item'], 'cpu_CFLAGS': '-mabi=apcs-gnu ' '-march=armv8-a -mtune=arm8' } self.assertEqual(vals, cpu.children[2].environment) # environment /hw/disk (list -> string) vals = {'corruptlist': 'nonlist', 'disk_type': 'virtio'} disk = self.tree.children[0].children[1] self.assertEqual(vals, disk.environment) # environment /hw/disk/scsi (string -> list) vals = {'corruptlist': ['againlist'], 'disk_type': 'scsi'} self.assertEqual(vals, disk.children[0].environment) # environment /env vals = {'opt_CFLAGS': '-Os'} self.assertEqual(vals, self.tree.children[2].environment) # leaves order leaves = [ 'intel', 'amd', 'arm', 'scsi', 'virtio', 'fedora', u'\u0161mint', 'prod' ] self.assertEqual(leaves, self.tree.get_leaves()) tree_view = tree.tree_view(self.tree, 0, False) # ascii treeview contains only ascii chars tree_view.decode('ascii') # ascii treeview contain all leaves for leaf in leaves: # In ascii mode we replace non-ascii character using # xmlcharrefreplace, make sure this is performed leaf = leaf.encode('ascii', errors='xmlcharrefreplace') self.assertIn(leaf, tree_view, "Leaf %s not in ascii:\n%s" % (leaf, tree_view))
def tests(self): mapping = {"SKIP": "warning", "ABORT": "danger", "ERROR": "danger", "FAIL": "danger", "WARN": "warning", "PASS": "******", "START": "info", "ALERT": "danger", "RUNNING": "info", "NOSTATUS": "info", "INTERRUPTED": "danger", "CANCEL": "warning"} test_info = [] results_dir = self.results_dir(False) for tst in self.result.tests: formatted = {} formatted['uid'] = tst['name'].uid formatted['name'] = tst['name'].name if 'params' in tst: params = '' try: parameters = 'Params:\n' for path, key, value in tst['params']: parameters += ' %s:%s => %s\n' % (path, key, value) except KeyError: pass else: params = parameters else: params = "No params" formatted['params'] = params formatted['variant'] = tst['name'].variant or '' formatted['status'] = tst['status'] logdir = os.path.join(results_dir, 'test-results', tst['logdir']) formatted['logdir'] = os.path.relpath(logdir, self.html_output_dir) logfile = os.path.join(logdir, 'debug.log') formatted['logfile'] = os.path.relpath(logfile, self.html_output_dir) formatted['logfile_basename'] = os.path.basename(logfile) formatted['time'] = "%.2f" % tst['time_elapsed'] local_time_start = time.localtime(tst['time_start']) formatted['time_start'] = time.strftime("%Y-%m-%d %H:%M:%S", local_time_start) formatted['row_class'] = mapping[tst['status']] exhibition_limit = 40 fail_reason = tst.get('fail_reason') if fail_reason is None: fail_reason = '<unknown>' fail_reason = astring.to_text(fail_reason) if len(fail_reason) > exhibition_limit: fail_reason = ('<a data-container="body" ' 'data-toggle="popover" ' 'data-placement="top" ' 'title="Error Details" ' 'data-content="%s">%s...</a>' % (fail_reason, fail_reason[:exhibition_limit])) formatted['fail_reason'] = fail_reason test_info.append(formatted) return test_info