def _save_to_json(matrix, filename, verbose=False): result = [] type_label_mapping = {} for line in matrix: try: test_type = type_label_mapping[line[0]] except KeyError: test_type = line[0] if verbose: tags = line[2] or {} result.append( { "Type": test_type, "Test": line[1], "Tags": {k: list(v or {}) for k, v in tags.items()}, } ) else: result.append({"Type": test_type, "Test": line[1]}) if filename == "-": LOG_UI.debug(json.dumps(result, indent=4)) else: with open(filename, "w", encoding="utf-8") as fp: json.dump(result, fp, indent=4)
def _display(self, suite, matrix, resolution=None): header = None verbose = suite.config.get('core.verbose') if verbose: header = (TERM_SUPPORT.header_str('Type'), TERM_SUPPORT.header_str('Test'), TERM_SUPPORT.header_str('Tag(s)')) for line in iter_tabular_output(matrix, header=header, strip=True): LOG_UI.debug(line) if verbose: LOG_UI.info("") LOG_UI.info("TEST TYPES SUMMARY") LOG_UI.info("==================") for key in sorted(suite.stats): LOG_UI.info("%s: %s", key, suite.stats[key]) if suite.tags_stats: LOG_UI.info("") LOG_UI.info("TEST TAGS SUMMARY") LOG_UI.info("=================") for key in sorted(suite.tags_stats): LOG_UI.info("%s: %s", key, suite.tags_stats[key]) if resolution: resolution_header = (TERM_SUPPORT.header_str('Resolver'), TERM_SUPPORT.header_str('Reference'), TERM_SUPPORT.header_str('Info')) LOG_UI.info("") for line in iter_tabular_output(resolution, header=resolution_header, strip=True): LOG_UI.info(line)
def handle_fetch(config): exitcode = exit_codes.AVOCADO_ALL_OK # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): return exit_codes.AVOCADO_ALL_OK return exitcode
def _save_to_json(matrix, filename, verbose=False): result = [] try: type_label_mapping = loader.loader.get_type_label_mapping() except RuntimeError: # We are in --resolver mode here, so lets create a fake mapping and # use the default type_label_mapping = {} for line in matrix: try: test_type = type_label_mapping[line[0]] except KeyError: test_type = line[0] if verbose: tags = line[2] or {} result.append({ 'Type': test_type, 'Test': line[1], 'Tags': {k: list(v or {}) for k, v in tags.items()} }) else: result.append({'Type': test_type, 'Test': line[1]}) if filename == '-': LOG_UI.debug(json.dumps(result, indent=4)) else: with open(filename, 'w') as fp: json.dump(result, fp, indent=4)
def render(self, result, job): xunit_enabled = job.config.get('job.run.result.xunit.enabled') xunit_output = job.config.get('job.run.result.xunit.output') if not (xunit_enabled or xunit_output): return if not result.tests_total: return max_test_log_size = job.config.get( 'job.run.result.xunit.max_test_log_chars') job_name = job.config.get('job.run.result.xunit.job_name') content = self._render(result, max_test_log_size, job_name) if xunit_enabled: xunit_path = os.path.join(job.logdir, 'results.xml') with open(xunit_path, 'wb') as xunit_file: xunit_file.write(content) xunit_path = xunit_output if xunit_path is not None: if xunit_path == '-': LOG_UI.debug(content.decode('UTF-8')) else: with open(xunit_path, 'wb') as xunit_file: xunit_file.write(content)
def run(self, args): """ Print libexec path and finish :param args: Command line args received from the run subparser. """ LOG_UI.debug(resource_filename("avocado", "libexec"))
def end_test(self, result, state): if not self.owns_stdout: return status = state.get("status", "ERROR") if status in self.omit_statuses: return if status == "TEST_NA": status = "SKIP" duration = (f" ({state.get('time_elapsed', -1):.2f} s)" if status != "SKIP" else "") if "name" in state: name = state["name"] uid = name.str_uid name = name.name + name.str_variant else: name = "<unknown>" uid = "?" msg = self.get_colored_status(status, state.get("fail_reason", None)) LOG_UI.debug( " (%s/%s) %s: ", uid, result.tests_total, name, extra={"skip_newline": True}, ) LOG_UI.debug(msg + duration)
def post(self, job): statuses = job.config.get('job.output.testlogs.statuses') if not statuses: return try: with open(os.path.join(job.logdir, 'results.json')) as json_file: results = json.load(json_file) except FileNotFoundError: return logfiles = job.config.get('job.output.testlogs.logfiles') for test in results['tests']: if test['status'] not in statuses: continue for logfile in logfiles: path = os.path.join(test['logdir'], logfile) try: with open(path) as log: LOG_UI.info( 'Log file "%s" content for test "%s" (%s):', logfile, test['id'], test['status']) LOG_UI.debug(log.read()) except (FileNotFoundError, PermissionError) as error: LOG_UI.error('Failure to access log file "%s": %s', path, error)
def error(self, message): LOG_UI.debug(self.format_help()) LOG_UI.error("%s: error: %s", self.prog, message) if "unrecognized arguments" in message: LOG_UI.warning("Perhaps a plugin is missing; run 'avocado" " plugins' to list the installed ones") self.exit(exit_codes.AVOCADO_FAIL)
def run(self, config): subcommand = config.get('assets_subcommand') # we want to let the command caller knows about fails exitcode = exit_codes.AVOCADO_ALL_OK if subcommand == 'fetch': # fetch assets from instrumented tests for test_file in config.get('assets.fetch.references'): if os.path.isfile(test_file) and test_file.endswith('.py'): LOG_UI.debug('Fetching assets from %s.', test_file) success, fail = fetch_assets(test_file) for asset_file in success: LOG_UI.debug(' File %s fetched or already on' ' cache.', asset_file) for asset_file in fail: LOG_UI.error(asset_file) if fail: exitcode |= exit_codes.AVOCADO_FAIL else: LOG_UI.warning('No such file or file not supported: %s', test_file) exitcode |= exit_codes.AVOCADO_FAIL # check if we should ignore the errors if config.get('assets.fetch.ignore_errors'): exitcode = exit_codes.AVOCADO_ALL_OK return exitcode
def render(self, result, job): xunit_enabled = job.config.get("job.run.result.xunit.enabled") xunit_output = job.config.get("job.run.result.xunit.output") if not (xunit_enabled or xunit_output): return if not result.tests_total: return max_test_log_size = job.config.get( "job.run.result.xunit.max_test_log_chars") job_name = job.config.get("job.run.result.xunit.job_name") content = self._render(result, max_test_log_size, job_name) if xunit_enabled: xunit_path = os.path.join(job.logdir, "results.xml") with open(xunit_path, "wb") as xunit_file: xunit_file.write(content) xunit_path = xunit_output if xunit_path is not None: if xunit_path == "-": LOG_UI.debug(content.decode("UTF-8")) else: with open(xunit_path, "wb") as xunit_file: xunit_file.write(content)
def _display(self, test_matrix, stats, tag_stats): header = None if self.args.verbose: header = (output.TERM_SUPPORT.header_str('Type'), output.TERM_SUPPORT.header_str('Test'), output.TERM_SUPPORT.header_str('Tag(s)')) for line in astring.iter_tabular_output(test_matrix, header=header, strip=True): LOG_UI.debug(line) if self.args.verbose: LOG_UI.info("") LOG_UI.info("TEST TYPES SUMMARY") LOG_UI.info("==================") for key in sorted(stats): LOG_UI.info("%s: %s", key.upper(), stats[key]) if tag_stats: LOG_UI.info("") LOG_UI.info("TEST TAGS SUMMARY") LOG_UI.info("=================") for key in sorted(tag_stats): LOG_UI.info("%s: %s", key, tag_stats[key])
def run(self, args): plugin_types = [ (dispatcher.CLICmdDispatcher(), 'Plugins that add new commands (cli.cmd):'), (dispatcher.CLIDispatcher(), 'Plugins that add new options to commands (cli):'), (dispatcher.JobPrePostDispatcher(), 'Plugins that run before/after the execution of jobs (job.prepost):'), (dispatcher.ResultDispatcher(), 'Plugins that generate job result in different formats (result):'), (dispatcher.ResultEventsDispatcher(args), ('Plugins that generate job result based on job/test events ' '(result_events):')), (dispatcher.VarianterDispatcher(), 'Plugins that generate test variants (varianter): ') ] for plugins_active, msg in plugin_types: LOG_UI.info(msg) plugin_matrix = [] for plugin in sorted(plugins_active, key=lambda x: x.name): plugin_matrix.append((plugin.name, plugin.obj.description)) if not plugin_matrix: LOG_UI.debug("(No active plugin)") else: for line in astring.iter_tabular_output(plugin_matrix): LOG_UI.debug(line)
def end_test(self, result, state): if not self.owns_stdout: return status = state.get("status", "ERROR") if status == "TEST_NA": status = "SKIP" duration = (" (%.2f s)" % state.get('time_elapsed', -1) if status != "SKIP" else "") if self.runner == 'nrunner': if "name" in state: name = state["name"] uid = name.str_uid name = name.name + name.str_variant else: name = "<unknown>" uid = '?' msg = self.get_colored_status(status, state.get("fail_reason", None)) LOG_UI.debug(' (%s/%s) %s: ', uid, result.tests_total, name, extra={"skip_newline": True}) else: msg = self.get_colored_status(status, state.get("fail_reason", None)) LOG_UI.debug(msg + duration)
def test_progress(self, progress=False): if not self.owns_stdout: return if progress: color = output.TERM_SUPPORT.PASS else: color = output.TERM_SUPPORT.PARTIAL LOG_UI.debug(color + self.__throbber.render() + output.TERM_SUPPORT.ENDC, extra={"skip_newline": True})
def end_test(self, result, state): if not self.owns_stdout: return status = state.get("status", "ERROR") if status == "TEST_NA": status = "SKIP" duration = (" (%.2f s)" % state.get('time_elapsed', -1) if status != "SKIP" else "") msg = self.get_colored_status(status, state.get("fail_reason", None)) LOG_UI.debug(msg + duration)
def run(self, args): """ Print libexec path and finish :param args: Command line args received from the run subparser. """ system_wide = '/usr/libexec/avocado' if os.path.isdir(system_wide): LOG_UI.debug(system_wide) else: LOG_UI.debug(resource_filename("avocado", "libexec"))
def run(self, config): err = None if config.get('tree') and config.get('varianter_debug'): err = "Option --tree is incompatible with --debug." elif not config.get('tree') and config.get('inherit'): err = "Option --inherit can be only used with --tree" if err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) varianter = config.get('avocado_variants') try: varianter.parse(config) except (IOError, ValueError) as details: LOG_UI.error("Unable to parse varianter: %s", details) sys.exit(exit_codes.AVOCADO_FAIL) use_utf8 = settings.get_value("runner.output", "utf8", key_type=bool, default=None) summary = config.get('summary') or 0 variants = config.get('variants') or 0 # Parse obsolete options (unsafe to combine them with new args) if config.get('tree'): variants = 0 summary += 1 if config.get('contents'): summary += 1 if config.get('inherit'): summary += 2 else: if config.get('contents'): variants += 2 # Export the serialized avocado_variants if config.get('json_variants_dump') is not None: try: with open(config.get('json_variants_dump'), 'w') as variants_file: json.dump( config.get('avocado_variants').dump(), variants_file) except IOError: LOG_UI.error("Cannot write %s", config.get('json_variants_dump')) sys.exit(exit_codes.AVOCADO_FAIL) # Produce the output lines = config.get('avocado_variants').to_str(summary=summary, variants=variants, use_utf8=use_utf8) for line in lines.splitlines(): LOG_UI.debug(line) sys.exit(exit_codes.AVOCADO_ALL_OK)
def run(self, config): """ Print libexec path and finish :param config: job configuration """ system_wide = '/usr/libexec/avocado' if os.path.isdir(system_wide): LOG_UI.debug(system_wide) else: LOG_UI.debug(resource_filename("avocado", "libexec"))
def send_request(self, req): LOG_UI.debug("beaker: %s %s ...", req.method, req.full_url) try: res = urllib.request.urlopen(req) # nosec return res except urllib.error.URLError as err: LOG_UI.info("beaker: %s %s failed: %s", req.method, req.full_url, err) return None except Exception as err: # should not happen LOG_UI.info("beaker: Oops: %s", err) return None
def start_test(self, result, state): if not self.owns_stdout: return if "name" in state: name = state["name"] uid = name.str_uid name = name.name + name.str_variant else: name = "<unknown>" uid = '?' LOG_UI.debug(' (%s/%s) %s: ', uid, result.tests_total, name, extra={"skip_newline": True})
def run(self, config): tree = config.get('variants.tree') summary = config.get('variants.summary') variants = config.get('variants.variants') contents = config.get('variants.contents') inherit = config.get('variants.inherit') err = None if tree and config.get('variants.debug'): err = "Option --tree is incompatible with --debug." elif not tree and inherit: err = "Option --inherit can be only used with --tree" if err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) varianter = config.get('avocado_variants') try: varianter.parse(config) except (IOError, ValueError) as details: LOG_UI.error("Unable to parse varianter: %s", details) sys.exit(exit_codes.AVOCADO_FAIL) use_utf8 = config.get("runner.output.utf8") # Parse obsolete options (unsafe to combine them with new args) if tree: variants = 0 summary += 1 if contents: summary += 1 if inherit: summary += 2 else: if contents: variants += 2 json_variants_dump = config.get('variants.json_variants_dump') # Export the serialized avocado_variants if json_variants_dump is not None: try: with open(json_variants_dump, 'w') as variants_file: json.dump( config.get('avocado_variants').dump(), variants_file) except IOError: LOG_UI.error("Cannot write %s", json_variants_dump) sys.exit(exit_codes.AVOCADO_FAIL) # Produce the output lines = config.get('avocado_variants').to_str(summary=summary, variants=variants, use_utf8=use_utf8) for line in lines.splitlines(): LOG_UI.debug(line) sys.exit(exit_codes.AVOCADO_ALL_OK)
def _display(self, suite, matrix): header = None verbose = suite.config.get('core.verbose') if verbose: header = (TERM_SUPPORT.header_str('Type'), TERM_SUPPORT.header_str('Test'), TERM_SUPPORT.header_str('Tag(s)')) for line in iter_tabular_output(matrix, header=header, strip=True): LOG_UI.debug(line) if verbose: if suite.resolutions: resolution_header = (TERM_SUPPORT.header_str('Resolver'), TERM_SUPPORT.header_str('Reference'), TERM_SUPPORT.header_str('Info')) LOG_UI.info("") mapping = { ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str, ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str, ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str } resolution_matrix = [] for r in suite.resolutions: decorator = mapping.get(r.result, TERM_SUPPORT.warn_header_str) if r.result == ReferenceResolutionResult.SUCCESS: continue resolution_matrix.append( (decorator(r.origin), r.reference, r.info or '')) for line in iter_tabular_output(resolution_matrix, header=resolution_header, strip=True): LOG_UI.info(line) LOG_UI.info("") LOG_UI.info("TEST TYPES SUMMARY") LOG_UI.info("==================") for key in sorted(suite.stats): LOG_UI.info("%s: %s", key, suite.stats[key]) if suite.tags_stats: LOG_UI.info("") LOG_UI.info("TEST TAGS SUMMARY") LOG_UI.info("=================") for key in sorted(suite.tags_stats): LOG_UI.info("%s: %s", key, suite.tags_stats[key])
def test_progress(self, progress=False): if not self.owns_stdout: return if progress: color = output.TERM_SUPPORT.PASS else: color = output.TERM_SUPPORT.PARTIAL if self.runner == 'runner': LOG_UI.debug('%s%s%s', color, self.__throbber.render(), output.TERM_SUPPORT.ENDC, extra={"skip_newline": True})
def _display(self, test_matrix, stats): header = None if self.args.verbose: header = (output.TERM_SUPPORT.header_str('Type'), output.TERM_SUPPORT.header_str('Test')) for line in astring.iter_tabular_output(test_matrix, header=header): LOG_UI.debug(line) if self.args.verbose: LOG_UI.debug("") for key in sorted(stats): LOG_UI.info("%s: %s", key.upper(), stats[key])
def end_test(self, result, state): if not self.owns_stdout: return status = state.get("status", "ERROR") if status == "TEST_NA": status = "SKIP" duration = (" (%.2f s)" % state.get('time_elapsed', -1) if status != "SKIP" else "") LOG_UI.debug(output.TERM_SUPPORT.MOVE_BACK + self.output_mapping[status] + status + output.TERM_SUPPORT.ENDC + duration)
def start_test(self, result, state): if not self.owns_stdout: return if "STARTED" in self.omit_statuses: return if "name" in state: name = state["name"] uid = name.str_uid name = name.name + name.str_variant else: name = "<unknown>" uid = '?' LOG_UI.debug(' (%s/%s) %s: STARTED', uid, result.tests_total, name)
def run(self, args): err = None if args.tree and args.varianter_debug: err = "Option --tree is incompatible with --debug." elif not args.tree and args.inherit: err = "Option --inherit can be only used with --tree" if err: LOG_UI.error(err) sys.exit(exit_codes.AVOCADO_FAIL) varianter = args.avocado_variants try: varianter.parse(args) except (IOError, ValueError) as details: LOG_UI.error("Unable to parse varianter: %s", details) sys.exit(exit_codes.AVOCADO_FAIL) use_utf8 = settings.get_value("runner.output", "utf8", key_type=bool, default=None) summary = args.summary or 0 variants = args.variants or 0 # Parse obsolete options (unsafe to combine them with new args) if args.tree: variants = 0 summary += 1 if args.contents: summary += 1 if args.inherit: summary += 2 else: if args.contents: variants += 2 # Export the serialized avocado_variants if args.json_variants_dump is not None: try: with open(args.json_variants_dump, 'w') as variants_file: json.dump(args.avocado_variants.dump(), variants_file) except IOError: LOG_UI.error("Cannot write %s", args.json_variants_dump) sys.exit(exit_codes.AVOCADO_FAIL) # Produce the output lines = args.avocado_variants.to_str(summary=summary, variants=variants, use_utf8=use_utf8) for line in lines.splitlines(): LOG_UI.debug(line) sys.exit(exit_codes.AVOCADO_ALL_OK)
def _display(self, suite, matrix): header = None verbose = suite.config.get('core.verbose') if verbose: header = (TERM_SUPPORT.header_str('Type'), TERM_SUPPORT.header_str('Test'), TERM_SUPPORT.header_str('Tag(s)')) # Any kind of color, string format and term specific should be applied # only during output/display phase. So this seems to be a better place # for this: matrix = self._prepare_matrix_for_display(matrix, verbose) for line in iter_tabular_output(matrix, header=header, strip=True): LOG_UI.debug(line) self._display_extra(suite, verbose)
def run(self, config): subcommand = config.get("vmimage_subcommand") if subcommand == 'list': images = list_downloaded_images() display_images_list(images) elif subcommand == 'get': name = config.get('vmimage.get.distro') version = config.get('vmimage.get.version') arch = config.get('vmimage.get.arch') try: image = download_image(name, version, arch) except AttributeError: LOG_UI.debug("The requested image could not be downloaded") return exit_codes.AVOCADO_FAIL LOG_UI.debug("The image was downloaded:") display_images_list([image]) return exit_codes.AVOCADO_ALL_OK
def display_images_list(images): """ Displays table with information about images :param images: list with image's parameters :type images: list of dicts """ image_matrix = [[ image['name'], image['version'], image['arch'], image['file'] ] for image in images] header = (output.TERM_SUPPORT.header_str('Provider'), output.TERM_SUPPORT.header_str('Version'), output.TERM_SUPPORT.header_str('Architecture'), output.TERM_SUPPORT.header_str('File')) for line in astring.iter_tabular_output(image_matrix, header=header, strip=True): LOG_UI.debug(line)
def _print_job_tests(tests): test_matrix = [] date_fmt = "%Y/%m/%d %H:%M:%S" for test in tests: status = test.get('status') decorator = output.TEST_STATUS_DECORATOR_MAPPING.get(status) end = datetime.fromtimestamp(test.get('end')) test_matrix.append( (test.get('id'), end.strftime(date_fmt), "%5f" % float(test.get('time')), decorator(status, ''))) header = (output.TERM_SUPPORT.header_str('Test ID'), output.TERM_SUPPORT.header_str('End Time'), output.TERM_SUPPORT.header_str('Run Time'), output.TERM_SUPPORT.header_str('Status')) for line in astring.iter_tabular_output(test_matrix, header=header, strip=True): LOG_UI.debug(line)
def render(self, result, job): if not (hasattr(job.args, 'xunit_job_result') or hasattr(job.args, 'xunit_output')): return if not result.tests_total: return content = self._render(result) if getattr(job.args, 'xunit_job_result', 'off') == 'on': xunit_path = os.path.join(job.logdir, 'results.xml') with open(xunit_path, 'w') as xunit_file: xunit_file.write(content) xunit_path = getattr(job.args, 'xunit_output', 'None') if xunit_path is not None: if xunit_path == '-': LOG_UI.debug(content) else: with open(xunit_path, 'w') as xunit_file: xunit_file.write(content)
def run(self, args): if args.distro_def_create: if not (args.distro_def_name and args.distro_def_version and args.distro_def_arch and args.distro_def_type and args.distro_def_path): LOG_UI.error('Required arguments: name, version, arch, type ' 'and path') sys.exit(exit_codes.AVOCADO_FAIL) output_file_name = self.get_output_file_name(args) if os.path.exists(output_file_name): error_msg = ('Output file "%s" already exists, will not ' 'overwrite it', output_file_name) LOG_UI.error(error_msg) else: LOG_UI.debug("Loading distro information from tree... " "Please wait...") distro = load_from_tree(args.distro_def_name, args.distro_def_version, args.distro_def_release, args.distro_def_arch, args.distro_def_type, args.distro_def_path) save_distro(distro, output_file_name) LOG_UI.debug('Distro information saved to "%s"', output_file_name) else: detected = utils_distro.detect() LOG_UI.debug('Detected distribution: %s (%s) version %s release ' '%s', detected.name, detected.arch, detected.version, detected.release)
def render(self, result, job): if not (hasattr(job.args, 'xunit_job_result') or hasattr(job.args, 'xunit_output')): return if not result.tests_total: return max_test_log_size = getattr(job.args, 'xunit_max_test_log_chars', None) job_name = getattr(job.args, 'xunit_job_name', None) content = self._render(result, max_test_log_size, job_name) if getattr(job.args, 'xunit_job_result', 'off') == 'on': xunit_path = os.path.join(job.logdir, 'results.xml') with open(xunit_path, 'wb') as xunit_file: xunit_file.write(content) xunit_path = getattr(job.args, 'xunit_output', 'None') if xunit_path is not None: if xunit_path == '-': LOG_UI.debug(content.decode('UTF-8')) else: with open(xunit_path, 'wb') as xunit_file: xunit_file.write(content)
def run(self, args): """ Print libexec path and finish :param args: Command line args received from the run subparser. """ if 'VIRTUAL_ENV' in os.environ: LOG_UI.debug('libexec') elif os.path.exists('/usr/libexec/avocado'): LOG_UI.debug('/usr/libexec/avocado') elif os.path.exists('/usr/lib/avocado'): LOG_UI.debug('/usr/lib/avocado') else: for path in os.environ.get('PATH').split(':'): if (os.path.exists(os.path.join(path, 'avocado')) and os.path.exists(os.path.join(os.path.dirname(path), 'libexec'))): LOG_UI.debug(os.path.join(os.path.dirname(path), 'libexec')) break else: LOG_UI.error("Can't locate avocado libexec path") sys.exit(exit_codes.AVOCADO_FAIL) return sys.exit(exit_codes.AVOCADO_ALL_OK)
def run(self, args): LOG_UI.info("Config files read (in order, '*' means the file exists " "and had been read):") for cfg_path in settings.all_config_paths: if cfg_path in settings.config_paths: LOG_UI.debug(' * %s', cfg_path) else: LOG_UI.debug(' %s', cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base %s', data_dir.get_base_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir()) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
def run(self, args): LOG_UI.info('Config files read (in order):') for cfg_path in settings.config_paths: LOG_UI.debug(' %s' % cfg_path) if settings.config_paths_failed: LOG_UI.error('\nConfig files that failed to read:') for cfg_path in settings.config_paths_failed: LOG_UI.error(' %s' % cfg_path) LOG_UI.debug("") if not args.datadir: blength = 0 for section in settings.config.sections(): for value in settings.config.items(section): clength = len('%s.%s' % (section, value[0])) if clength > blength: blength = clength format_str = " %-" + str(blength) + "s %s" LOG_UI.debug(format_str, 'Section.Key', 'Value') for section in settings.config.sections(): for value in settings.config.items(section): config_key = ".".join((section, value[0])) LOG_UI.debug(format_str, config_key, value[1]) else: LOG_UI.debug("Avocado replaces config dirs that can't be accessed") LOG_UI.debug("with sensible defaults. Please edit your local config") LOG_UI.debug("file to customize values") LOG_UI.debug('') LOG_UI.info('Avocado Data Directories:') LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' logs ' + data_dir.get_logs_dir()) LOG_UI.debug(' cache ' + ", ".join(data_dir.get_cache_dirs()))
def run(self, args): def _get_name(test): return str(test['id']) def _get_name_no_id(test): return str(test['id']).split('-', 1)[1] job1_dir, job1_id = self._setup_job(args.jobids[0]) job2_dir, job2_id = self._setup_job(args.jobids[1]) job1_data = self._get_job_data(job1_dir) job2_data = self._get_job_data(job2_dir) report_header = 'Avocado Job Report\n' job1_results = [report_header] job2_results = [report_header] if 'cmdline' in args.diff_filter: cmdline1 = self._get_command_line(job1_dir) cmdline2 = self._get_command_line(job2_dir) if str(cmdline1) != str(cmdline2): command_line_header = ['\n', '# COMMAND LINE\n'] job1_results.extend(command_line_header) job1_results.append(cmdline1) job2_results.extend(command_line_header) job2_results.append(cmdline2) if 'time' in args.diff_filter: time1 = '%.2f s\n' % job1_data['time'] time2 = '%.2f s\n' % job2_data['time'] if str(time1) != str(time2): total_time_header = ['\n', '# TOTAL TIME\n'] job1_results.extend(total_time_header) job1_results.append(time1) job2_results.extend(total_time_header) job2_results.append(time2) if 'variants' in args.diff_filter: variants1 = self._get_variants(job1_dir) variants2 = self._get_variants(job2_dir) if str(variants1) != str(variants2): variants_header = ['\n', '# VARIANTS\n'] job1_results.extend(variants_header) job1_results.extend(variants1) job2_results.extend(variants_header) job2_results.extend(variants2) if 'results' in args.diff_filter: results1 = [] if args.diff_strip_id: get_name = _get_name_no_id else: get_name = _get_name for test in job1_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results1.append(test_result) results2 = [] for test in job2_data['tests']: test_result = '%s: %s\n' % (get_name(test), str(test['status'])) results2.append(test_result) if str(results1) != str(results2): test_results_header = ['\n', '# TEST RESULTS\n'] job1_results.extend(test_results_header) job1_results.extend(results1) job2_results.extend(test_results_header) job2_results.extend(results2) if 'config' in args.diff_filter: config1 = self._get_config(job1_dir) config2 = self._get_config(job2_dir) if str(config1) != str(config2): config_header = ['\n', '# SETTINGS\n'] job1_results.extend(config_header) job1_results.extend(config1) job2_results.extend(config_header) job2_results.extend(config2) if 'sysinfo' in args.diff_filter: sysinfo_pre1 = self._get_sysinfo(job1_dir, 'pre') sysinfo_pre2 = self._get_sysinfo(job2_dir, 'pre') if str(sysinfo_pre1) != str(sysinfo_pre2): sysinfo_header_pre = ['\n', '# SYSINFO PRE\n'] job1_results.extend(sysinfo_header_pre) job1_results.extend(sysinfo_pre1) job2_results.extend(sysinfo_header_pre) job2_results.extend(sysinfo_pre2) sysinfo_post1 = self._get_sysinfo(job1_dir, 'post') sysinfo_post2 = self._get_sysinfo(job2_dir, 'post') if str(sysinfo_post1) != str(sysinfo_post2): sysinfo_header_post = ['\n', '# SYSINFO POST\n'] job1_results.extend(sysinfo_header_post) job1_results.extend(sysinfo_post1) job2_results.extend(sysinfo_header_post) job2_results.extend(sysinfo_post2) if getattr(args, 'create_reports', False): self.std_diff_output = False prefix = 'avocado_diff_%s_' % job1_id[:7] tmp_file1 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file1.writelines(job1_results) tmp_file1.close() prefix = 'avocado_diff_%s_' % job2_id[:7] tmp_file2 = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.txt', delete=False) tmp_file2.writelines(job2_results) tmp_file2.close() LOG_UI.info('%s %s', tmp_file1.name, tmp_file2.name) if (getattr(args, 'open_browser', False) and getattr(args, 'html', None) is None): prefix = 'avocado_diff_%s_%s_' % (job1_id[:7], job2_id[:7]) tmp_file = tempfile.NamedTemporaryFile(mode='w', prefix=prefix, suffix='.html', delete=False) setattr(args, 'html', tmp_file.name) if getattr(args, 'html', None) is not None: self.std_diff_output = False try: html_diff = HtmlDiff() html_diff._legend = """ <table class="diff" summary="Legends"> <tr> <td> <table border="" summary="Colors"> <tr><th> Colors </th> </tr> <tr><td class="diff_add"> Added </td></tr> <tr><td class="diff_chg">Changed</td> </tr> <tr><td class="diff_sub">Deleted</td> </tr> </table></td> <td> <table border="" summary="Links"> <tr><th colspan="2"> Links </th> </tr> <tr><td>(f)irst change</td> </tr> <tr><td>(n)ext change</td> </tr> <tr><td>(t)op</td> </tr> </table></td> </tr> </table>""" job_diff_html = html_diff.make_file((_.decode("utf-8") for _ in job1_results), (_.decode("utf-8") for _ in job2_results), fromdesc=job1_id, todesc=job2_id) with open(args.html, 'w') as html_file: html_file.writelines(job_diff_html.encode("utf-8")) LOG_UI.info(args.html) except IOError as exception: LOG_UI.error(exception) sys.exit(exit_codes.AVOCADO_FAIL) if getattr(args, 'open_browser', False): setsid = getattr(os, 'setsid', None) if not setsid: setsid = getattr(os, 'setpgrp', None) with open(os.devnull, "r+") as inout: cmd = ['xdg-open', args.html] subprocess.Popen(cmd, close_fds=True, stdin=inout, stdout=inout, stderr=inout, preexec_fn=setsid) if self.std_diff_output: if self.term.enabled: for line in self._cdiff(unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id)): LOG_UI.debug(line.strip()) else: for line in unified_diff(job1_results, job2_results, fromfile=job1_id, tofile=job2_id): LOG_UI.debug(line.strip())