async def _check_requirement_core_avocado(): env_type = 'podman' env = settings.as_dict().get('spawner.podman.image') req_type = 'core' req = 'avocado' on_cache = cache.get_requirement(env_type, env, req_type, req) if on_cache: return True podman_bin = settings.as_dict().get('spawner.podman.bin') try: # pylint: disable=E1133 proc = await asyncio.create_subprocess_exec( podman_bin, "run", env, "avocado", "--version", stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) except (FileNotFoundError, PermissionError): return False await proc.wait() if proc.returncode != 0: return False stdout = await proc.stdout.read() if re.match(rb'^Avocado (\d+)\.(\d+)\r?$', stdout): cache.set_requirement(env_type, env, req_type, req) return True return False
def fetch_assets(test_file, klass=None, method=None, logger=None): """Fetches the assets based on keywords listed on FetchAssetHandler.calls. :param test_file: File name of instrumented test to be evaluated :type test_file: str :returns: list of names that were successfully fetched and list of fails. """ cache_dirs = settings.as_dict().get('datadir.paths.cache_dirs') timeout = settings.as_dict().get('assets.fetch.timeout') success = [] fail = [] handler = FetchAssetHandler(test_file, klass, method) for call in handler.calls: expire = call.pop('expire', None) if expire is not None: expire = data_structures.time_to_seconds(str(expire)) try: asset_obj = Asset(**call, cache_dirs=cache_dirs, expire=expire) if logger is not None: logger.info('Fetching asset from %s:%s.%s', test_file, klass, method) asset_obj.fetch(timeout) success.append(call['name']) except (OSError, ValueError) as failed: fail.append(failed) return success, fail
def unittest(config, tag=""): """ Perform self testing for sanity and test result validation. :param config: command line arguments and run configuration :type config: {str, str} :param str tag: extra name identifier for the test to be run """ import unittest util_unittests = unittest.TestSuite() util_testrunner = unittest.TextTestRunner(stream=sys.stdout, verbosity=2) root_path = settings.as_dict().get('i2n.common.suite_path') subtests_filter = config["tests_params"].get("ut_filter", "*_unittest.py") subtests_path = os.path.join(root_path, "utils") subtests_suite = unittest.defaultTestLoader.discover(subtests_path, pattern=subtests_filter, top_level_dir=subtests_path) util_unittests.addTest(subtests_suite) subtests_path = os.path.join(root_path, "tools") subtests_suite = unittest.defaultTestLoader.discover(subtests_path, pattern=subtests_filter, top_level_dir=subtests_path) util_unittests.addTest(subtests_suite) util_testrunner.run(util_unittests)
def download_image(distro, version=None, arch=None): """ Downloads the vmimage to the cache directory if doesn't already exist :param distro: Name of image distribution :type distro: str :param version: Version of image :type version: str :param arch: Architecture of image :type arch: str :raise AttributeError: When image can't be downloaded :return: Information about downloaded image :rtype: dict """ cache_dir = settings.as_dict().get('datadir.paths.cache_dirs')[0] image_info = vmimage.get(name=distro, version=version, arch=arch, cache_dir=cache_dir) file_path = image_info.base_image image = { 'name': distro, 'version': image_info.version, 'arch': image_info.arch, 'file': file_path } return image
def __init__(self, name, config=None, tests=None, job_config=None, resolutions=None, enabled=True): self.name = name self.tests = tests self.resolutions = resolutions self.enabled = enabled # Create a complete config dict with all registered options + custom # config self.config = settings.as_dict() if job_config: self.config.update(job_config) if config: self.config.update(config) self._variants = None self._references = None self._runner = None self._test_parameters = None self._check_both_parameters_and_variants() if self.config.get('run.dry_run.enabled'): self._convert_to_dry_run() if self.size == 0: return
def python_resolver(name, reference, find_tests): module_path, tests_filter = reference_split(reference) if tests_filter is not None: tests_filter = re.compile(tests_filter) criteria_check = check_file(module_path, reference) if criteria_check is not True: return criteria_check # disabled tests not needed here class_methods_info, _ = find_tests(module_path) runnables = [] for klass, methods_tags_reqs in class_methods_info.items(): for (method, tags, reqs) in methods_tags_reqs: klass_method = "%s.%s" % (klass, method) if tests_filter is not None and not tests_filter.search( klass_method): continue uri = "%s:%s" % (module_path, klass_method) runnables.append( Runnable(name, uri=uri, tags=tags, requirements=reqs, config=settings.as_dict(r'^runner\.'))) if runnables: return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, runnables) return ReferenceResolution(reference, ReferenceResolutionResult.NOTFOUND)
def __init__(self): self.HEADER = self.COLOR_BLUE self.PASS = self.COLOR_GREEN self.SKIP = self.COLOR_YELLOW self.FAIL = self.COLOR_RED self.INTERRUPT = self.COLOR_RED self.ERROR = self.COLOR_RED self.WARN = self.COLOR_YELLOW self.CANCEL = self.COLOR_YELLOW self.PARTIAL = self.COLOR_YELLOW self.ENDC = self.CONTROL_END self.LOWLIGHT = self.COLOR_DARKGREY self.enabled = True allowed_terms = ['linux', 'xterm', 'xterm-256color', 'vt100', 'screen', 'screen-256color', 'screen.xterm-256color'] term = os.environ.get("TERM") config = settings.as_dict() colored = config.get('runner.output.colored') force_color = config.get('runner.output.color') if force_color == "never": self.disable() elif force_color == "auto": if not colored or not os.isatty(1) or term not in allowed_terms: self.disable() elif force_color != "always": raise ValueError("The value for runner.output.color must be one of " "'always', 'never', 'auto' and not " + force_color)
def load_addons_tools(): """Load all custom manual steps defined in the test suite tools folder.""" suite_path = settings.as_dict().get('i2n.common.suite_path') tools_path = os.path.join(suite_path, "tools") sys.path.append(tools_path) # we have no other choice to avoid loading at intertest import global __all__ for tool in os.listdir(tools_path): if tool.endswith(".py") and not tool.endswith("_unittest.py"): module_name = tool.replace(".py", "") logging.debug("Loading tools in %s", module_name) try: module = importlib.import_module(module_name) except Exception as error: logging.error("Could not load tool %s: %s", module_name, error) continue if "__all__" not in module.__dict__: logging.warning("Detected tool module doesn't contain publicly defined tools") continue names = module.__dict__["__all__"] globals().update({k: getattr(module, k) for k in names}) __all__ += module.__all__ logging.info("Loaded custom tools: %s", ", ".join(module.__all__))
def filter_runnable_config(kind, config): """ Returns only essential values for specific runner. It will use configuration from argument completed by values from config file and avocado default configuration. :param kind: Kind of runner which should use the configuration. :type kind: str :param config: Configuration values for runner. If some values will be missing the default ones and from config file will be used. :type config: dict :returns: Config dict, which has only values essential for runner based on STANDALONE_EXECUTABLE_CONFIG_USED :rtype: dict """ command = Runnable.pick_runner_command(kind) if command is not None: whole_config = settings.as_dict() whole_config.update(config) command = " ".join(command) configuration_used = STANDALONE_EXECUTABLE_CONFIG_USED.get(command) filtered_config = {} for config_item in configuration_used: filtered_config[config_item] = whole_config.get(config_item) return filtered_config else: raise ValueError(f"Unsupported kind of runnable: {kind}")
def _get_settings_dir(dir_name): """ Returns a given "datadir" directory as set by the configuration system """ namespace = 'datadir.paths.{}'.format(dir_name) path = settings.as_dict().get(namespace) return os.path.abspath(path)
def _get_reference_resolution(self, reference): self.config = settings.as_dict() try: cartesian_parser = self._get_parser() self._save_parser_cartesian_config(cartesian_parser) except Exception as details: return ReferenceResolution(reference, ReferenceResolutionResult.ERROR, info=details) if reference != '': cartesian_parser.only_filter(reference) runnables = [self._parameters_to_runnable(d) for d in cartesian_parser.get_dicts()] if runnables: warnings.warn("The VT NextRunner is experimental and doesn't have " "current Avocado VT features") if self.config.get("nrunner.max_parallel_tasks", 1) != 1: warnings.warn("The VT NextRunner can be run only with " "nrunner-max-parallel-tasks set to 1") return ReferenceResolution(reference, ReferenceResolutionResult.SUCCESS, runnables) else: return ReferenceResolution(reference, ReferenceResolutionResult.NOTFOUND)
def discover(self, reference, which_tests=loader.DiscoverMode.DEFAULT): avocado_suite = [] subtests_filter = None unsafe = settings.as_dict().get('plugins.glib.unsafe') if reference is None: return [] if ':' in reference: reference, _subtests_filter = reference.split(':', 1) subtests_filter = re.compile(_subtests_filter) if (os.path.isfile(reference) and path.PathInspector(reference).has_exec_permission() and unsafe): try: cmd = '%s -l' % (reference) result = process.run(cmd) except Exception as details: # pylint: disable=W0703 if which_tests == loader.DiscoverMode.ALL: return [(NotGLibTest, {"name": "%s: %s" % (reference, details)})] return [] for test_item in result.stdout_text.splitlines(): test_name = "%s:%s" % (reference, test_item) if subtests_filter and not subtests_filter.search(test_name): continue avocado_suite.append((GLibTest, {'name': test_name, 'executable': test_name})) if which_tests == loader.DiscoverMode.ALL and not avocado_suite: return [(NotGLibTest, {"name": "%s: No GLib-like tests found" % reference})] return avocado_suite
def handle_default(): LOG_UI.info("Config files read (in order, '*' means the file exists " "and had been read):") # Getting from settings because is already sorted config = settings.as_dict() for cfg_path in settings.all_config_paths: if cfg_path in settings.config_paths: LOG_UI.debug(' * %s', cfg_path) else: LOG_UI.debug(' %s', cfg_path) LOG_UI.debug("") if not config.get('config.datadir'): blength = 0 for namespace, value in config.items(): clength = len(namespace) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for namespace, value in config.items(): LOG_UI.debug(format_str, namespace, value) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base %s', data_dir.get_base_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir()) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
def run(self, runnable): # pylint: disable=W0201 self.runnable = runnable yield self.prepare_status("started") name = self.runnable.kwargs.get("name") # if name was passed correctly, run the Avocado Asset utility if name is not None: asset_hash = self.runnable.kwargs.get("asset_hash") algorithm = self.runnable.kwargs.get("algorithm") locations = self.runnable.kwargs.get("locations") expire = self.runnable.kwargs.get("expire") if expire is not None: expire = data_structures.time_to_seconds(str(expire)) cache_dirs = self.runnable.config.get("datadir.paths.cache_dirs") if cache_dirs is None: cache_dirs = settings.as_dict().get("datadir.paths.cache_dirs") # let's spawn it to another process to be able to update the # status messages and avoid the Asset to lock this process queue = SimpleQueue() process = Process( target=self._fetch_asset, args=( name, asset_hash, algorithm, locations, cache_dirs, expire, queue, ), ) process.start() while queue.empty(): time.sleep(RUNNER_RUN_STATUS_INTERVAL) yield self.prepare_status("running") output = queue.get() result = output["result"] stdout = output["stdout"] stderr = output["stderr"] else: # Otherwise, log the missing package name result = "error" stdout = "" stderr = 'At least name should be passed as kwargs using name="uri".' yield self.prepare_status("running", { "type": "stdout", "log": stdout.encode() }) yield self.prepare_status("running", { "type": "stderr", "log": stderr.encode() }) yield self.prepare_status("finished", {"result": result})
def discover(self): """It will discover vt test resolutions from cartesian config.""" self.config = settings.as_dict() if (not get_opt(self.config, 'vt.config') and not get_opt(self.config, 'list.resolver')): return ReferenceResolution('', ReferenceResolutionResult.NOTFOUND) return [self._get_reference_resolution('')]
def get_datafile_path(*args): """ Get a path relative to the data dir. :param args: Arguments passed to os.path.join. Ex ('images', 'jeos.qcow2') """ config = settings.as_dict() new_args = tuple([config.get('datadir.paths.data_dir')] + list(args)) return os.path.join(*new_args)
def main(): for image in KNOWN_IMAGES: name, version, arch, checksum, algorithm = image print(f"{name} version {version} ({arch}): ", end='') cache_dir = settings.as_dict().get('datadir.paths.cache_dirs')[0] download = vmimage.get(name=name, version=version, arch=arch, checksum=checksum, algorithm=algorithm, cache_dir=cache_dir) print(download.base_image)
def enabled(self, extension): """ Checks configuration for explicit mention of plugin in a disable list If configuration section or key doesn't exist, it means no plugin is disabled. """ disabled = settings.as_dict().get('plugins.disable') return self.fully_qualified_name(extension) not in disabled
def get_crash_dir(): config = settings.as_dict() crash_dir_path = os.path.join(config.get('datadir.paths.data_dir'), "crashes") try: os.makedirs(crash_dir_path) except OSError: pass return crash_dir_path
def get_cache_dirs(): """ Returns the list of cache dirs, according to configuration and convention. This will be deprecated. Please use settings.as_dict() or self.config. .. warning:: This method is deprecated, get values from settings.as_dict() or self.config """ warnings.warn(("get_cache_dirs() is deprecated, get values from " "settings.as_dict() or self.config"), DeprecationWarning) return settings.as_dict().get('datadir.paths.cache_dirs')
def get_logs_dir(): """ Get the most appropriate log dir location. The log dir is where we store job/test logs in general. .. warning:: This method is deprecated, get values from settings.as_dict() or self.config """ warnings.warn(("get_logs_dir() is deprecated, get values from " "settings.as_dict() or self.config"), DeprecationWarning) return settings.as_dict().get('datadir.paths.logs_dir')
async def create_server(self): limit = settings.as_dict().get("nrunner.status_server_buffer_size") if ":" in self._uri: host, port = self._uri.split(":") port = int(port) self._server_task = await asyncio.start_server( self.cb, host=host, port=port, limit=limit ) else: self._server_task = await asyncio.start_unix_server( self.cb, path=self._uri, limit=limit )
def is_task_alive(runtime_task): if runtime_task.spawner_handle is None: return False podman_bin = settings.as_dict().get('spawner.podman.bin') cmd = [podman_bin, "ps", "--all", "--format={{.State}}", "--filter=id=%s" % runtime_task.spawner_handle] process = subprocess.Popen(cmd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) out, _ = process.communicate() # FIXME: check how podman 2.x is reporting valid "OK" states return out.startswith(b'Up ')
def __init__(self, namespace, invoke_kwds=None): super().__init__(namespace, invoke_kwds) namespace = "%s.order" % self.settings_section() configured_order = settings.as_dict().get(namespace) ordered = [] if configured_order: for name in configured_order: for ext in self.extensions: if name == ext.name: ordered.append(ext) for ext in self.extensions: if ext not in ordered: ordered.append(ext) self.extensions = ordered
def run(self, runnable): # pylint: disable=W0201 self.runnable = runnable yield self.prepare_status('started') name = self.runnable.kwargs.get('name') # if name was passed correctly, run the Avocado Asset utility if name is not None: asset_hash = self.runnable.kwargs.get('asset_hash') algorithm = self.runnable.kwargs.get('algorithm') locations = self.runnable.kwargs.get('locations') expire = self.runnable.kwargs.get('expire') if expire is not None: expire = data_structures.time_to_seconds(str(expire)) cache_dirs = self.runnable.config.get('datadir.paths.cache_dirs') if cache_dirs is None: cache_dirs = settings.as_dict().get('datadir.paths.cache_dirs') # let's spawn it to another process to be able to update the # status messages and avoid the Asset to lock this process queue = SimpleQueue() process = Process(target=self._fetch_asset, args=(name, asset_hash, algorithm, locations, cache_dirs, expire, queue)) process.start() while queue.empty(): time.sleep(RUNNER_RUN_STATUS_INTERVAL) yield self.prepare_status('running') output = queue.get() result = output['result'] stdout = output['stdout'] stderr = output['stderr'] else: # Otherwise, log the missing package name result = 'error' stdout = '' stderr = ('At least name should be passed as kwargs using' ' name="uri".') yield self.prepare_status('running', {'type': 'stdout', 'log': stdout.encode()}) yield self.prepare_status('running', {'type': 'stderr', 'log': stderr.encode()}) yield self.prepare_status('finished', {'result': result})
def get_base_dir(): """ Get the most appropriate base dir. The base dir is the parent location for most of the avocado other important directories. Examples: * Log directory * Data directory * Tests directory """ warnings.warn(("get_base_dir() is deprecated, get values from " "settings.as_dict() or self.config"), DeprecationWarning) return settings.as_dict().get('datadir.paths.base_dir')
def resolve(reference): criteria_check = check_file(reference, reference, suffix=None, type_name='executable file', access_check=os.R_OK | os.X_OK, access_name='executable') if criteria_check is not True: return criteria_check return ReferenceResolution( reference, ReferenceResolutionResult.SUCCESS, [ Runnable( 'tap', reference, config=settings.as_dict(r'^runner\.')) ])
def list_downloaded_images(): """ List the available Image inside avocado cache :return: list with image's parameters :rtype: list of dicts """ images = [] for cache_dir in settings.as_dict().get("datadir.paths.cache_dirs"): for root, _, files in os.walk(cache_dir): if files: metadata_files = [ pos_json for pos_json in files if pos_json.endswith("_metadata.json") ] files = list(set(files) - set(metadata_files)) for metadata_file in metadata_files: with open(os.path.join(root, metadata_file), "r", encoding="utf-8") as data: metadata = json.loads(data.read()) if isinstance(metadata, dict): if metadata.get("type", None) == "vmimage": provider = None for p in vmimage.IMAGE_PROVIDERS: if p.name == metadata["name"]: provider = p( metadata["version"], metadata["build"], metadata["arch"], ) break if provider is not None: for image in files: if re.match(provider.file_name, image): data = { "name": provider.name, "version": provider.version, "arch": provider.arch, "file": os.path.join(root, image), } images.append(data) break return images
def get_data_dir(): """ Get the most appropriate data dir location. The data dir is the location where any data necessary to job and test operations are located. Examples: * ISO files * GPG files * VM images * Reference bitmaps .. warning:: This method is deprecated, get values from settings.as_dict() or self.config """ warnings.warn(("get_data_dir() is deprecated, get values from " "settings.as_dict() or self.config"), DeprecationWarning) return settings.as_dict().get('datadir.paths.data_dir')
def from_config(cls, config, name=None, job_config=None): """Helper method to create a TestSuite from config dicts. This is different from the TestSuite() initialization because here we are assuming that you need some help to build the test suite. Avocado will try to resolve tests based on the configuration information instead of assuming pre populated tests. If you need to create a custom TestSuite, please use the TestSuite() constructor instead of this method. :param config: A config dict to be used on the desired test suite. :type config: dict :param name: The name of the test suite. This is optional and default is a random uuid. :type name: str :param job_config: The job config dict (a global config). Use this to avoid huge configs per test suite. This is also optional. :type job_config: dict """ suite_config = config config = settings.as_dict() if job_config: config.update(job_config) config.update(suite_config) runner = config.get("run.test_runner") if runner == "nrunner": suite = cls._from_config_with_resolver(config, name) else: raise TestSuiteError( f'Suite creation for runner "{runner}" ' f"is not supported" ) if not config.get("run.ignore_missing_references"): if not suite.tests: msg = ( "Test Suite could not be created. No test references " "provided nor any other arguments resolved into tests" ) raise TestSuiteError(msg) return suite