Exemple #1
0
    def run(self, args):
        if args.distro_def_create:
            if not (args.distro_def_name and args.distro_def_version and
                    args.distro_def_arch and args.distro_def_type and
                    args.distro_def_path):
                LOG_UI.error('Required arguments: name, version, arch, type '
                             'and path')
                sys.exit(exit_codes.AVOCADO_FAIL)

            output_file_name = self.get_output_file_name(args)
            if os.path.exists(output_file_name):
                error_msg = ('Output file "%s" already exists, will not '
                             'overwrite it', output_file_name)
                LOG_UI.error(error_msg)
            else:
                LOG_UI.debug("Loading distro information from tree... "
                             "Please wait...")
                distro = load_from_tree(args.distro_def_name,
                                        args.distro_def_version,
                                        args.distro_def_release,
                                        args.distro_def_arch,
                                        args.distro_def_type,
                                        args.distro_def_path)
                save_distro(distro, output_file_name)
                LOG_UI.debug('Distro information saved to "%s"',
                             output_file_name)
        else:
            detected = utils_distro.detect()
            LOG_UI.debug('Detected distribution: %s (%s) version %s release '
                         '%s', detected.name, detected.arch, detected.version,
                         detected.release)
Exemple #2
0
    def initialize(self, args):
        self.variants = None

        cit_parameter_file = getattr(args, "cit_parameter_file", None)
        if cit_parameter_file is None:
            return
        else:
            cit_parameter_file = os.path.expanduser(cit_parameter_file)
            if not os.access(cit_parameter_file, os.R_OK):
                LOG_UI.error("parameter file '%s' could not be found or "
                             "is not readable", cit_parameter_file)
                self.error_exit(args)

        config = configparser.ConfigParser()
        try:
            config.read(cit_parameter_file)
        except Exception as details:
            LOG_UI.error("Cannot parse parameter file: %s", details)
            self.error_exit(args)

        parameters = [(key, value.split(', '))
                      for key, value in config.items('parameters')]
        order = args.cit_order_of_combinations
        cit = Cit(parameters, order)
        self.headers, self.variants = cit.combine()
Exemple #3
0
    def run(self, args):
        """
        Print libexec path and finish

        :param args: Command line args received from the run subparser.
        """
        LOG_UI.debug(resource_filename("avocado", "libexec"))
Exemple #4
0
 def test_progress(self, progress=False):
     if not self.owns_stdout:
         return
     if progress:
         color = output.TERM_SUPPORT.PASS
     else:
         color = output.TERM_SUPPORT.PARTIAL
     LOG_UI.debug(color + self.__throbber.render() +
                  output.TERM_SUPPORT.ENDC, extra={"skip_newline": True})
Exemple #5
0
 def post_tests(self, job):
     if not self.owns_stdout:
         return
     if job.status == 'PASS':
         LOG_UI.info("RESULTS    : PASS %d | ERROR %d | FAIL %d | SKIP %d | "
                     "WARN %d | INTERRUPT %s | CANCEL %s", job.result.passed,
                     job.result.errors, job.result.failed, job.result.skipped,
                     job.result.warned, job.result.interrupted,
                     job.result.cancelled)
    def run(self, args):
        """
        Print libexec path and finish

        :param args: Command line args received from the run subparser.
        """
        system_wide = '/usr/libexec/avocado'
        if os.path.isdir(system_wide):
            LOG_UI.debug(system_wide)
        else:
            LOG_UI.debug(resource_filename("avocado", "libexec"))
Exemple #7
0
 def _get_test_suite(self, paths):
     if self.args.verbose:
         which_tests = loader.DiscoverMode.ALL
     else:
         which_tests = loader.DiscoverMode.AVAILABLE
     try:
         return loader.loader.discover(paths,
                                       which_tests=which_tests)
     except loader.LoaderUnhandledReferenceError as details:
         LOG_UI.error(str(details))
         sys.exit(exit_codes.AVOCADO_FAIL)
Exemple #8
0
 def end_test(self, result, state):
     if not self.owns_stdout:
         return
     status = state.get("status", "ERROR")
     if status == "TEST_NA":
         status = "SKIP"
     duration = (" (%.2f s)" % state.get('time_elapsed', -1)
                 if status != "SKIP"
                 else "")
     msg = self.get_colored_status(status, state.get("fail_reason", None))
     LOG_UI.debug(msg + duration)
Exemple #9
0
 def start_test(self, result, state):
     if not self.owns_stdout:
         return
     if "name" in state:
         name = state["name"]
         uid = name.str_uid
         name = name.name + name.str_variant
     else:
         name = "<unknown>"
         uid = '?'
     LOG_UI.debug(' (%s/%s) %s:  ', uid, result.tests_total, name,
                  extra={"skip_newline": True})
Exemple #10
0
 def end_test(self, result, state):
     if not self.owns_stdout:
         return
     status = state.get("status", "ERROR")
     if status == "TEST_NA":
         status = "SKIP"
     duration = (" (%.2f s)" % state.get('time_elapsed', -1)
                 if status != "SKIP"
                 else "")
     LOG_UI.debug(output.TERM_SUPPORT.MOVE_BACK +
                  self.output_mapping[status] +
                  status + output.TERM_SUPPORT.ENDC +
                  duration)
    def initialize(self, args):
        load_variants = getattr(args, "json_variants_load", None)

        if load_variants is None:
            self.variants = _NO_VARIANTS
            return
        try:
            with open(load_variants, 'r') as var_file:
                self.variants = varianter.Varianter(state=json.load(var_file))
        except IOError:
            LOG_UI.error("JSON serialized file '%s' could not be found or "
                         "is not readable", load_variants)
            if args.subcommand == 'run':
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                sys.exit(exit_codes.AVOCADO_FAIL)
    def mail(self, job):
        # build proper subject based on job status
        subject = '%s Job %s - Status: %s' % (self.subject,
                                              job.unique_id,
                                              job.status)
        msg = MIMEText(subject)
        msg['Subject'] = self.subject
        msg['From'] = self.sender
        msg['To'] = self.rcpt

        # So many possible failures, let's just tell the user about it
        try:
            smtp = smtplib.SMTP(self.server)
            smtp.sendmail(self.sender, [self.rcpt], msg.as_string())
            smtp.quit()
        except:
            LOG_UI.error("Failure to send email notification: "
                         "please check your mail configuration")
Exemple #13
0
    def _check_required_args(args, enable_arg, required_args):
        """
        :return: True when enable_arg enabled and all required args are set
        :raise sys.exit: When missing required argument.
        """
        if (not hasattr(args, enable_arg) or
                not getattr(args, enable_arg)):
            return False
        missing = []
        for arg in required_args:
            if not getattr(args, arg):
                missing.append(arg)
        if missing:
            LOG_UI.error("Use of %s requires %s arguments to be set. Please "
                         "set %s.", enable_arg, ', '.join(required_args),
                         ', '.join(missing))

            return sys.exit(exit_codes.AVOCADO_FAIL)
        return True
Exemple #14
0
    def run(self, args):
        """
        Run test modules or simple tests.

        :param args: Command line args received from the run subparser.
        """
        if 'output_check_record' in args:
            process.OUTPUT_CHECK_RECORD_MODE = getattr(args,
                                                       'output_check_record',
                                                       None)

        if args.unique_job_id is not None:
            try:
                int(args.unique_job_id, 16)
                if len(args.unique_job_id) != 40:
                    raise ValueError
            except ValueError:
                LOG_UI.error('Unique Job ID needs to be a 40 digit hex number')
                sys.exit(exit_codes.AVOCADO_FAIL)
        try:
            args.job_timeout = time_to_seconds(args.job_timeout)
        except ValueError as detail:
            LOG_UI.error(detail.args[0])
            sys.exit(exit_codes.AVOCADO_FAIL)
        with job.Job(args) as job_instance:
            pre_post_dispatcher = JobPrePostDispatcher()
            try:
                # Run JobPre plugins
                output.log_plugin_failures(pre_post_dispatcher.load_failures)
                pre_post_dispatcher.map_method('pre', job_instance)

                job_run = job_instance.run()
            finally:
                # Run JobPost plugins
                pre_post_dispatcher.map_method('post', job_instance)

            result_dispatcher = ResultDispatcher()
            if result_dispatcher.extensions:
                result_dispatcher.map_method('render',
                                             job_instance.result,
                                             job_instance)
        return job_run
Exemple #15
0
    def render(self, result, job):
        if not (hasattr(job.args, 'xunit_job_result') or
                hasattr(job.args, 'xunit_output')):
            return

        if not result.tests_total:
            return

        content = self._render(result)
        if getattr(job.args, 'xunit_job_result', 'off') == 'on':
            xunit_path = os.path.join(job.logdir, 'results.xml')
            with open(xunit_path, 'w') as xunit_file:
                xunit_file.write(content)

        xunit_path = getattr(job.args, 'xunit_output', 'None')
        if xunit_path is not None:
            if xunit_path == '-':
                LOG_UI.debug(content)
            else:
                with open(xunit_path, 'w') as xunit_file:
                    xunit_file.write(content)
Exemple #16
0
    def _run_scripts(self, kind, scripts_dir, job):
        if not os.path.isdir(scripts_dir):
            if self.warn_non_existing_dir:
                LOG_UI.error("Directory configured to hold %s-job scripts "
                             "has not been found: %s", kind, scripts_dir)
            return

        dir_list = os.listdir(scripts_dir)
        scripts = [os.path.join(scripts_dir, f) for f in dir_list]
        scripts = [f for f in scripts
                   if os.access(f, os.R_OK | os.X_OK)]
        scripts.sort()
        if not scripts:
            return

        env = self._job_to_environment_variables(job)
        for script in scripts:
            result = process.run(script, ignore_status=True, env=env)
            if (result.exit_status != 0) and self.warn_non_zero_status:
                LOG_UI.error('%s job script "%s" exited with status "%i"',
                             kind.capitalize(), script, result.exit_status)
Exemple #17
0
    def _setup_job(job_id):
        if os.path.isdir(job_id):
            resultsdir = os.path.expanduser(job_id)
            job_id = ''
        elif os.path.isfile(job_id):
            resultsdir = os.path.dirname(os.path.expanduser(job_id))
            job_id = ''
        else:
            logdir = settings.get_value(section='datadir.paths',
                                        key='logs_dir', key_type='path',
                                        default=None)
            try:
                resultsdir = jobdata.get_resultsdir(logdir, job_id)
            except ValueError as exception:
                LOG_UI.error(exception)
                sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory for '%s' in '%s'",
                         job_id, logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'), job_id)
        if sourcejob is None:
            LOG_UI.error("Can't find matching job id '%s' in '%s' directory.",
                         job_id, resultsdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        return resultsdir, sourcejob
Exemple #18
0
    def initialize(self, args):
        debug = getattr(args, "varianter_debug", False)

        if debug:
            data = mux.MuxTreeNodeDebug()
        else:
            data = mux.MuxTreeNode()

        # Merge the multiplex
        multiplex_files = getattr(args, "mux_yaml", None)
        if multiplex_files:
            try:
                data.merge(create_from_yaml(multiplex_files, debug))
            except IOError as details:
                error_msg = "%s : %s" % (details.strerror, details.filename)
                LOG_UI.error(error_msg)
                if args.subcommand == 'run':
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                else:
                    sys.exit(exit_codes.AVOCADO_FAIL)

        # Extend default multiplex tree of --mux-inject values
        for inject in getattr(args, "mux_inject", []):
            entry = inject.split(':', 3)
            if len(entry) < 2:
                raise ValueError("key:entry pairs required, found only %s"
                                 % (entry))
            elif len(entry) == 2:   # key, entry
                entry.insert(0, '')  # add path='' (root)
            data.get_node(entry[0], True).value[entry[1]] = entry[2]

        mux_filter_only = getattr(args, 'mux_filter_only', None)
        mux_filter_out = getattr(args, 'mux_filter_out', None)
        data = mux.apply_filters(data, mux_filter_only, mux_filter_out)
        if data != mux.MuxTreeNode():
            paths = getattr(args, "mux_parameter_paths", ["/run/*"])
            if paths is None:
                paths = ["/run/*"]
            self.initialize_mux(data, paths, debug)
Exemple #19
0
    def run(self, args):
        err = None
        if args.tree and args.varianter_debug:
            err = "Option --tree is incompatible with --debug."
        elif not args.tree and args.inherit:
            err = "Option --inherit can be only used with --tree"
        if err:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)
        varianter = args.avocado_variants
        try:
            varianter.parse(args)
        except (IOError, ValueError) as details:
            LOG_UI.error("Unable to parse varianter: %s", details)
            sys.exit(exit_codes.AVOCADO_FAIL)
        use_utf8 = settings.get_value("runner.output", "utf8",
                                      key_type=bool, default=None)
        summary = args.summary or 0
        variants = args.variants or 0

        # Parse obsolete options (unsafe to combine them with new args)
        if args.tree:
            variants = 0
            summary += 1
            if args.contents:
                summary += 1
            if args.inherit:
                summary += 2
        else:
            if args.contents:
                variants += 2

        # Export the serialized avocado_variants
        if args.json_variants_dump is not None:
            try:
                with open(args.json_variants_dump, 'w') as variants_file:
                    json.dump(args.avocado_variants.dump(), variants_file)
            except IOError:
                LOG_UI.error("Cannot write %s", args.json_variants_dump)
                sys.exit(exit_codes.AVOCADO_FAIL)

        # Produce the output
        lines = args.avocado_variants.to_str(summary=summary,
                                             variants=variants,
                                             use_utf8=use_utf8)
        for line in lines.splitlines():
            LOG_UI.debug(line)

        sys.exit(exit_codes.AVOCADO_ALL_OK)
Exemple #20
0
    def render(self, result, job):
        if job.status == "RUNNING":
            return  # Don't create results on unfinished jobs
        if not (hasattr(job.args, 'html_job_result') or
                hasattr(job.args, 'html_output')):
            return

        open_browser = getattr(job.args, 'open_browser', False)
        if getattr(job.args, 'html_job_result', 'off') == 'on':
            html_path = os.path.join(job.logdir, 'results.html')
            self._render(result, html_path)
            if getattr(job.args, 'stdout_claimed_by', None) is None:
                LOG_UI.info("JOB HTML   : %s", html_path)
            if open_browser:
                self._open_browser(html_path)
                open_browser = False

        html_path = getattr(job.args, 'html_output', 'None')
        if html_path is not None:
            self._render(result, html_path)
            if open_browser:
                self._open_browser(html_path)
Exemple #21
0
 def pre_tests(self, job):
     if not self.owns_stdout:
         return
     LOG_UI.info("JOB ID     : %s", job.unique_id)
     replay_source_job = getattr(job.args, "replay_sourcejob", False)
     if replay_source_job:
         LOG_UI.info("SRC JOB ID : %s", replay_source_job)
     LOG_UI.info("JOB LOG    : %s", job.logfile)
Exemple #22
0
    def run(self, args):
        """
        Print libexec path and finish

        :param args: Command line args received from the run subparser.
        """
        if 'VIRTUAL_ENV' in os.environ:
            LOG_UI.debug('libexec')
        elif os.path.exists('/usr/libexec/avocado'):
            LOG_UI.debug('/usr/libexec/avocado')
        elif os.path.exists('/usr/lib/avocado'):
            LOG_UI.debug('/usr/lib/avocado')
        else:
            for path in os.environ.get('PATH').split(':'):
                if (os.path.exists(os.path.join(path, 'avocado')) and
                    os.path.exists(os.path.join(os.path.dirname(path),
                                                'libexec'))):
                    LOG_UI.debug(os.path.join(os.path.dirname(path), 'libexec'))
                    break
            else:
                LOG_UI.error("Can't locate avocado libexec path")
                sys.exit(exit_codes.AVOCADO_FAIL)
        return sys.exit(exit_codes.AVOCADO_ALL_OK)
Exemple #23
0
    def render(self, result, job):
        if not (hasattr(job.args, 'xunit_job_result') or
                hasattr(job.args, 'xunit_output')):
            return

        if not result.tests_total:
            return

        max_test_log_size = getattr(job.args, 'xunit_max_test_log_chars', None)
        job_name = getattr(job.args, 'xunit_job_name', None)
        content = self._render(result, max_test_log_size, job_name)
        if getattr(job.args, 'xunit_job_result', 'off') == 'on':
            xunit_path = os.path.join(job.logdir, 'results.xml')
            with open(xunit_path, 'wb') as xunit_file:
                xunit_file.write(content)

        xunit_path = getattr(job.args, 'xunit_output', 'None')
        if xunit_path is not None:
            if xunit_path == '-':
                LOG_UI.debug(content.decode('UTF-8'))
            else:
                with open(xunit_path, 'wb') as xunit_file:
                    xunit_file.write(content)
Exemple #24
0
    def initialize(self, args):
        self.variants = None
        error = False

        pict_parameter_file = getattr(args, "pict_parameter_file", None)
        if pict_parameter_file is None:
            return
        else:
            pict_parameter_file = os.path.expanduser(pict_parameter_file)
            if not os.access(pict_parameter_file, os.R_OK):
                LOG_UI.error("pict parameter file '%s' could not be found or "
                             "is not readable", pict_parameter_file)
                error = True

        pict_binary = getattr(args, "pict_binary", None)
        if pict_binary is None:
            LOG_UI.error("pict binary could not be found in $PATH. Please set "
                         "its location with --pict-binary or put it in your "
                         "$PATH")
            error = True
        else:
            pict_binary = os.path.expanduser(pict_binary)
            if not os.access(pict_binary, os.R_OK | os.X_OK):
                LOG_UI.error("pict binary '%s' can not be executed, please check "
                             "the option given with --pict-binary and/or the file "
                             "permissions", pict_binary)
                error = True

        if error:
            if args.subcommand == 'run':
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)
            else:
                sys.exit(exit_codes.AVOCADO_FAIL)

        self.parameter_path = getattr(args, "pict_parameter_path")

        output = run_pict(pict_binary,
                          pict_parameter_file,
                          getattr(args, "pict_order_of_combinations"))
        self.headers, self.variants = parse_pict_output(output)
Exemple #25
0
    def _display(self, test_matrix, stats, tag_stats):
        header = None
        if self.args.verbose:
            header = (output.TERM_SUPPORT.header_str('Type'),
                      output.TERM_SUPPORT.header_str('Test'),
                      output.TERM_SUPPORT.header_str('Tag(s)'))

        for line in astring.iter_tabular_output(test_matrix, header=header,
                                                strip=True):
            LOG_UI.debug(line)

        if self.args.verbose:
            LOG_UI.info("")
            LOG_UI.info("TEST TYPES SUMMARY")
            LOG_UI.info("==================")
            for key in sorted(stats):
                LOG_UI.info("%s: %s", key.upper(), stats[key])

            if tag_stats:
                LOG_UI.info("")
                LOG_UI.info("TEST TAGS SUMMARY")
                LOG_UI.info("=================")
                for key in sorted(tag_stats):
                    LOG_UI.info("%s: %s", key, tag_stats[key])
Exemple #26
0
 def list(self):
     try:
         self._list()
     except KeyboardInterrupt:
         LOG_UI.error('Command interrupted by user...')
         return exit_codes.AVOCADO_FAIL
Exemple #27
0
 def handle(self, message, task, job):
     encoding = message.get('encoding', 'utf-8')
     output = message['log'].decode(encoding)
     task_id = TestID.from_identifier(task.identifier)
     output = "%s: %s" % (task_id, output)
     LOG_UI.debug(output)
Exemple #28
0
    def _display_extra(suite, verbose=True):
        """Display extra data when in verbose mode."""
        if not verbose:
            return

        if suite.resolutions:
            resolution_header = (
                TERM_SUPPORT.header_str("Resolver"),
                TERM_SUPPORT.header_str("Reference"),
                TERM_SUPPORT.header_str("Info"),
            )
            LOG_UI.info("")

            mapping = {
                ReferenceResolutionResult.SUCCESS: TERM_SUPPORT.healthy_str,
                ReferenceResolutionResult.NOTFOUND: TERM_SUPPORT.fail_header_str,
                ReferenceResolutionResult.ERROR: TERM_SUPPORT.fail_header_str,
            }
            resolution_matrix = []
            for r in suite.resolutions:
                decorator = mapping.get(r.result, TERM_SUPPORT.warn_header_str)
                if r.result == ReferenceResolutionResult.SUCCESS:
                    continue
                resolution_matrix.append(
                    (decorator(r.origin), r.reference, r.info or "")
                )

            for line in iter_tabular_output(
                resolution_matrix, header=resolution_header, strip=True
            ):
                LOG_UI.info(line)

        LOG_UI.info("")
        LOG_UI.info("TEST TYPES SUMMARY")
        LOG_UI.info("==================")
        for key in sorted(suite.stats):
            LOG_UI.info("%s: %s", key, suite.stats[key])

        if suite.tags_stats:
            LOG_UI.info("")
            LOG_UI.info("TEST TAGS SUMMARY")
            LOG_UI.info("=================")
            for key in sorted(suite.tags_stats):
                LOG_UI.info("%s: %s", key, suite.tags_stats[key])
Exemple #29
0
    def _display(suite, matrix):
        header = None
        verbose = suite.config.get('core.verbose')
        if verbose:
            header = (TERM_SUPPORT.header_str('Type'),
                      TERM_SUPPORT.header_str('Test'),
                      TERM_SUPPORT.header_str('Tag(s)'))

        for line in iter_tabular_output(matrix, header=header, strip=True):
            LOG_UI.debug(line)

        if verbose:
            if suite.resolutions:
                resolution_header = (TERM_SUPPORT.header_str('Resolver'),
                                     TERM_SUPPORT.header_str('Reference'),
                                     TERM_SUPPORT.header_str('Info'))
                LOG_UI.info("")

                mapping = {
                    ReferenceResolutionResult.SUCCESS:
                    TERM_SUPPORT.healthy_str,
                    ReferenceResolutionResult.NOTFOUND:
                    TERM_SUPPORT.fail_header_str,
                    ReferenceResolutionResult.ERROR:
                    TERM_SUPPORT.fail_header_str
                }
                resolution_matrix = []
                for r in suite.resolutions:
                    decorator = mapping.get(r.result,
                                            TERM_SUPPORT.warn_header_str)
                    if r.result == ReferenceResolutionResult.SUCCESS:
                        continue
                    resolution_matrix.append(
                        (decorator(r.origin), r.reference, r.info or ''))

                for line in iter_tabular_output(resolution_matrix,
                                                header=resolution_header,
                                                strip=True):
                    LOG_UI.info(line)

            LOG_UI.info("")
            LOG_UI.info("TEST TYPES SUMMARY")
            LOG_UI.info("==================")
            for key in sorted(suite.stats):
                LOG_UI.info("%s: %s", key, suite.stats[key])

            if suite.tags_stats:
                LOG_UI.info("")
                LOG_UI.info("TEST TAGS SUMMARY")
                LOG_UI.info("=================")
                for key in sorted(suite.tags_stats):
                    LOG_UI.info("%s: %s", key, suite.tags_stats[key])
Exemple #30
0
 def post(self, job):
     if job.status == 'PASS':
         if not job.config.get('stdout_claimed_by', None):
             LOG_UI.info("JOB TIME   : %.2f s", job.time_elapsed)
Exemple #31
0
 def post(self, job):
     if job.status == 'PASS':
         if not getattr(job.args, 'stdout_claimed_by', None):
             LOG_UI.info("JOB TIME   : %.2f s", job.time_elapsed)
Exemple #32
0
    def run(self, config):
        def _get_name(test):
            return str(test['id'])

        def _get_name_no_id(test):
            return str(test['id']).split('-', 1)[1]

        job1_dir, job1_id = self._setup_job(config.get('diff.jobids')[0])
        job2_dir, job2_id = self._setup_job(config.get('diff.jobids')[1])

        job1_data = self._get_job_data(job1_dir)
        job2_data = self._get_job_data(job2_dir)

        report_header = 'Avocado Job Report\n'
        job1_results = [report_header]
        job2_results = [report_header]

        diff_filter = config.get('diff.filter')
        if 'cmdline' in diff_filter:
            cmdline1 = self._get_command_line(job1_dir)
            cmdline2 = self._get_command_line(job2_dir)

            if str(cmdline1) != str(cmdline2):
                command_line_header = ['\n',
                                       '# COMMAND LINE\n']
                job1_results.extend(command_line_header)
                job1_results.append(cmdline1)
                job2_results.extend(command_line_header)
                job2_results.append(cmdline2)

        if 'time' in diff_filter:
            time1 = '%.2f s\n' % job1_data['time']
            time2 = '%.2f s\n' % job2_data['time']

            if str(time1) != str(time2):
                total_time_header = ['\n',
                                     '# TOTAL TIME\n']
                job1_results.extend(total_time_header)
                job1_results.append(time1)
                job2_results.extend(total_time_header)
                job2_results.append(time2)

        if 'variants' in diff_filter:
            variants1 = self._get_variants(job1_dir)
            variants2 = self._get_variants(job2_dir)

            if str(variants1) != str(variants2):
                variants_header = ['\n',
                                   '# VARIANTS\n']
                job1_results.extend(variants_header)
                job1_results.extend(variants1)
                job2_results.extend(variants_header)
                job2_results.extend(variants2)

        if 'results' in diff_filter:
            results1 = []
            if config.get('diff.strip_id'):
                get_name = _get_name_no_id
            else:
                get_name = _get_name
            for test in job1_data['tests']:
                test_result = '%s: %s\n' % (get_name(test),
                                            str(test['status']))
                results1.append(test_result)
            results2 = []
            for test in job2_data['tests']:
                test_result = '%s: %s\n' % (get_name(test),
                                            str(test['status']))
                results2.append(test_result)

            if str(results1) != str(results2):
                test_results_header = ['\n',
                                       '# TEST RESULTS\n']
                job1_results.extend(test_results_header)
                job1_results.extend(results1)
                job2_results.extend(test_results_header)
                job2_results.extend(results2)

        if 'config' in diff_filter:
            config1 = self._get_config(job1_dir)
            config2 = self._get_config(job2_dir)

            if str(config1) != str(config2):
                config_header = ['\n',
                                 '# SETTINGS\n']
                job1_results.extend(config_header)
                job1_results.extend(config1)
                job2_results.extend(config_header)
                job2_results.extend(config2)

        if 'sysinfo' in diff_filter:
            sysinfo_pre1 = self._get_sysinfo(job1_dir, 'pre')
            sysinfo_pre2 = self._get_sysinfo(job2_dir, 'pre')

            if str(sysinfo_pre1) != str(sysinfo_pre2):
                sysinfo_header_pre = ['\n',
                                      '# SYSINFO PRE\n']
                job1_results.extend(sysinfo_header_pre)
                job1_results.extend(sysinfo_pre1)
                job2_results.extend(sysinfo_header_pre)
                job2_results.extend(sysinfo_pre2)

            sysinfo_post1 = self._get_sysinfo(job1_dir, 'post')
            sysinfo_post2 = self._get_sysinfo(job2_dir, 'post')

            if str(sysinfo_post1) != str(sysinfo_post2):
                sysinfo_header_post = ['\n',
                                       '# SYSINFO POST\n']
                job1_results.extend(sysinfo_header_post)
                job1_results.extend(sysinfo_post1)
                job2_results.extend(sysinfo_header_post)
                job2_results.extend(sysinfo_post2)

        if config.get('diff.create_reports'):
            self.std_diff_output = False
            prefix = 'avocado_diff_%s_' % job1_id[:7]
            tmp_file1 = tempfile.NamedTemporaryFile(mode='w',
                                                    prefix=prefix,
                                                    suffix='.txt',
                                                    delete=False)
            tmp_file1.writelines(job1_results)
            tmp_file1.close()

            prefix = 'avocado_diff_%s_' % job2_id[:7]
            tmp_file2 = tempfile.NamedTemporaryFile(mode='w',
                                                    prefix=prefix,
                                                    suffix='.txt',
                                                    delete=False)
            tmp_file2.writelines(job2_results)
            tmp_file2.close()

            LOG_UI.info('%s %s', tmp_file1.name, tmp_file2.name)

        html_file = config.get('diff.html')
        open_browser = config.get('diff.open_browser')
        if open_browser and html_file is None:
            prefix = 'avocado_diff_%s_%s_' % (job1_id[:7], job2_id[:7])
            tmp_file = tempfile.NamedTemporaryFile(mode='w',
                                                   prefix=prefix,
                                                   suffix='.html',
                                                   delete=False)

            html_file = tmp_file.name

        if html_file is not None:
            self.std_diff_output = False
            try:
                html_diff = HtmlDiff()
                # pylint: disable=W0212
                html_diff._legend = """
                    <table class="diff" summary="Legends">
                    <tr> <td> <table border="" summary="Colors">
                    <tr><th> Colors </th> </tr>
                    <tr><td class="diff_add">&nbsp;Added&nbsp;</td></tr>
                    <tr><td class="diff_chg">Changed</td> </tr>
                    <tr><td class="diff_sub">Deleted</td> </tr>
                    </table></td>
                    <td> <table border="" summary="Links">
                    <tr><th colspan="2"> Links </th> </tr>
                    <tr><td>(f)irst change</td> </tr>
                    <tr><td>(n)ext change</td> </tr>
                    <tr><td>(t)op</td> </tr>
                    </table></td> </tr>
                    </table>"""

                job_diff_html = html_diff.make_file((_ for _ in job1_results),
                                                    (_ for _ in job2_results),
                                                    fromdesc=job1_id,
                                                    todesc=job2_id)

                with open(html_file, 'w', encoding='utf-8') as fp:
                    fp.writelines(job_diff_html)
                LOG_UI.info(html_file)

            except IOError as exception:
                LOG_UI.error(exception)
                sys.exit(exit_codes.AVOCADO_FAIL)

        if open_browser:
            setsid = getattr(os, 'setsid', None)
            if not setsid:
                setsid = getattr(os, 'setpgrp', None)
            with open(os.devnull, "r+", encoding='utf-8') as inout:
                cmd = ['xdg-open', html_file]
                subprocess.Popen(cmd, close_fds=True, stdin=inout,  # pylint: disable=W1509
                                 stdout=inout, stderr=inout,
                                 preexec_fn=setsid)

        if self.std_diff_output:
            if self.term.enabled:
                for line in self._cdiff(unified_diff(job1_results,
                                                     job2_results,
                                                     fromfile=job1_id,
                                                     tofile=job2_id)):
                    LOG_UI.debug(line.strip())
            else:
                for line in unified_diff(job1_results,
                                         job2_results,
                                         fromfile=job1_id,
                                         tofile=job2_id):
                    LOG_UI.debug(line.strip())
Exemple #33
0
    def run(self, config):
        hint_filepath = '.avocado.hint'
        hint = None
        if os.path.exists(hint_filepath):
            hint = HintParser(hint_filepath)
        resolutions = resolver.resolve(config.get('nrun.references'), hint)
        tasks = job.resolutions_to_tasks(resolutions, config)
        # pylint: disable=W0201
        self.pending_tasks, missing_requirements = nrunner.check_tasks_requirements(tasks)
        if missing_requirements:
            missing_tasks_msg = "\n".join([str(t) for t in missing_requirements])
            LOG_UI.warning('Tasks will not be run due to missing requirements: %s',
                           missing_tasks_msg)

        if not self.pending_tasks:
            LOG_UI.error('No test to be executed, exiting...')
            sys.exit(exit_codes.AVOCADO_JOB_FAIL)

        for index, task in enumerate(self.pending_tasks, start=1):
            task.identifier = str(TestID(index, task.runnable.uri))

        if not config.get('nrun.disable_task_randomization'):
            random.shuffle(self.pending_tasks)

        self.spawned_tasks = []  # pylint: disable=W0201

        try:
            if config.get('nrun.spawner') == 'podman':
                if not os.path.exists(PodmanSpawner.PODMAN_BIN):
                    msg = ('Podman Spawner selected, but podman binary "%s" '
                           'is not available on the system.  Please install '
                           'podman before attempting to use this feature.')
                    msg %= PodmanSpawner.PODMAN_BIN
                    LOG_UI.error(msg)
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                self.spawner = PodmanSpawner()  # pylint: disable=W0201
            elif config.get('nrun.spawner') == 'process':
                self.spawner = ProcessSpawner()  # pylint: disable=W0201
            else:
                LOG_UI.error("Spawner not implemented or invalid.")
                sys.exit(exit_codes.AVOCADO_JOB_FAIL)

            listen = config.get('nrun.status_server.listen')
            verbose = config.get('core.verbose')
            self.status_server = nrunner.StatusServer(listen,  # pylint: disable=W0201
                                                      [t.identifier for t in
                                                       self.pending_tasks],
                                                      verbose)
            self.status_server.start()
            parallel_tasks = config.get('nrun.parallel_tasks')
            loop = asyncio.get_event_loop()
            loop.run_until_complete(self.spawn_tasks(parallel_tasks))
            loop.run_until_complete(self.status_server.wait())
            self.report_results()
            exit_code = exit_codes.AVOCADO_ALL_OK
            if self.status_server.result.get('fail') is not None:
                exit_code |= exit_codes.AVOCADO_TESTS_FAIL
            elif self.status_server.result.get('error') is not None:
                exit_code |= exit_codes.AVOCADO_TESTS_FAIL
            return exit_code
        except Exception as e:  # pylint: disable=W0703
            LOG_UI.error(e)
            return exit_codes.AVOCADO_FAIL
Exemple #34
0
 def render(self, result, job):
     if (getattr(job.args, 'resultsdb_logs', None) is not None
             and getattr(job.args, 'stdout_claimed_by', None) is None):
         log_msg = "JOB URL    : %s/%s"
         LOG_UI.info(log_msg, job.args.resultsdb_logs,
                     os.path.basename(job.logdir))
Exemple #35
0
    def handle_default():
        LOG_UI.info("Config files read (in order, '*' means the file exists "
                    "and had been read):")

        # Getting from settings because is already sorted
        config = settings.as_dict()
        for cfg_path in settings.all_config_paths:
            if cfg_path in settings.config_paths:
                LOG_UI.debug('    * %s', cfg_path)
            else:
                LOG_UI.debug('      %s', cfg_path)
        LOG_UI.debug("")
        if not config.get('config.datadir'):
            blength = 0
            for namespace, value in config.items():
                clength = len(namespace)
                if clength > blength:
                    blength = clength

            format_str = "    %-" + str(blength) + "s %s"

            LOG_UI.debug(format_str, 'Section.Key', 'Value')
            for namespace, value in config.items():
                LOG_UI.debug(format_str, namespace, value)
        else:
            cache_dirs = config.get('datadir.paths.cache_dirs')
            LOG_UI.debug("Avocado replaces config dirs that can't be accessed")
            LOG_UI.debug("with sensible defaults. Please edit your local config")
            LOG_UI.debug("file to customize values")
            LOG_UI.debug('')
            LOG_UI.info('Avocado Data Directories:')
            LOG_UI.debug('   base    %s', config.get('datadir.paths.base_dir'))
            LOG_UI.debug('   tests   %s', data_dir.get_test_dir())
            LOG_UI.debug('   data    %s', config.get('datadir.paths.data_dir'))
            LOG_UI.debug('   logs    %s', config.get('datadir.paths.logs_dir'))
            LOG_UI.debug('   cache   %s', ", ".join(cache_dirs))
Exemple #36
0
 def sleep(self, job):  # pylint: disable=W0613
     seconds = job.config.get('plugins.job.sleep.seconds')
     for i in range(1, seconds + 1):
         LOG_UI.info("Sleeping %2i/%s", i, seconds)
         time.sleep(1)
Exemple #37
0
 def run(self, args):
     if 'html_output' in args and args.html_output == '-':
         LOG_UI.error('HTML to stdout not supported (not all HTML resources'
                      ' can be embedded on a single file)')
         sys.exit(exit_codes.AVOCADO_JOB_FAIL)
Exemple #38
0
    def _render(self, result, output_path):
        context = ReportModel(result=result, html_output=output_path)
        template = pkg_resources.resource_string(
            'avocado_result_html', 'resources/templates/report.mustache')

        # pylint: disable=E0611
        try:
            if hasattr(pystache, 'Renderer'):
                renderer = pystache.Renderer('utf-8', 'utf-8')
                report_contents = renderer.render(template, context)
            else:
                from pystache import view
                v = view.View(template, context)
                report_contents = v.render('utf8')
        except UnicodeDecodeError as details:
            # FIXME: Remove me when UnicodeDecodeError problem is fixed
            LOG_UI.critical("\n" + ("-" * 80))
            LOG_UI.critical("HTML failed to render the template: %s\n\n",
                            template)
            LOG_UI.critical("-" * 80)
            LOG_UI.critical("%s:\n\n", details)
            LOG_UI.critical("%r", getattr(details, "object",
                                          "object not found"))
            LOG_UI.critical("-" * 80)
            raise

        with codecs.open(output_path, 'w', 'utf-8') as report_file:
            report_file.write(report_contents)
Exemple #39
0
    def run(self, args):
        LOG_UI.info('Config files read (in order):')
        for cfg_path in settings.config_paths:
            LOG_UI.debug('    %s' % cfg_path)
        if settings.config_paths_failed:
            LOG_UI.error('\nConfig files that failed to read:')
            for cfg_path in settings.config_paths_failed:
                LOG_UI.error('    %s' % cfg_path)
        LOG_UI.debug("")
        if not args.datadir:
            blength = 0
            for section in settings.config.sections():
                for value in settings.config.items(section):
                    clength = len('%s.%s' % (section, value[0]))
                    if clength > blength:
                        blength = clength

            format_str = "    %-" + str(blength) + "s %s"

            LOG_UI.debug(format_str, 'Section.Key', 'Value')
            for section in settings.config.sections():
                for value in settings.config.items(section):
                    config_key = ".".join((section, value[0]))
                    LOG_UI.debug(format_str, config_key, value[1])
        else:
            LOG_UI.debug("Avocado replaces config dirs that can't be accessed")
            LOG_UI.debug("with sensible defaults. Please edit your local config")
            LOG_UI.debug("file to customize values")
            LOG_UI.debug('')
            LOG_UI.info('Avocado Data Directories:')
            LOG_UI.debug('    base     ' + data_dir.get_base_dir())
            LOG_UI.debug('    tests    ' + data_dir.get_test_dir())
            LOG_UI.debug('    data     ' + data_dir.get_data_dir())
            LOG_UI.debug('    logs     ' + data_dir.get_logs_dir())
            LOG_UI.debug('    cache    ' + ", ".join(data_dir.get_cache_dirs()))
Exemple #40
0
    def run(self, args):
        if getattr(args, 'replay_jobid', None) is None:
            return

        err = None
        if args.replay_teststatus and 'variants' in args.replay_ignore:
            err = ("Option `--replay-test-status` is incompatible with "
                   "`--replay-ignore variants`.")
        elif args.replay_teststatus and args.reference:
            err = ("Option --replay-test-status is incompatible with "
                   "test references given on the command line.")
        elif getattr(args, "remote_hostname", False):
            err = "Currently we don't replay jobs in remote hosts."
        if err is not None:
            LOG_UI.error(err)
            sys.exit(exit_codes.AVOCADO_FAIL)

        base_logdir = getattr(args, 'base_logdir', None)
        if base_logdir is None:
            base_logdir = settings.get_value(section='datadir.paths',
                                             key='logs_dir',
                                             key_type='path',
                                             default=None)
        try:
            resultsdir = jobdata.get_resultsdir(base_logdir, args.replay_jobid)
        except ValueError as exception:
            LOG_UI.error(exception.message)
            sys.exit(exit_codes.AVOCADO_FAIL)

        if resultsdir is None:
            LOG_UI.error("Can't find job results directory in '%s'",
                         base_logdir)
            sys.exit(exit_codes.AVOCADO_FAIL)

        sourcejob = jobdata.get_id(os.path.join(resultsdir, 'id'),
                                   args.replay_jobid)
        if sourcejob is None:
            msg = ("Can't find matching job id '%s' in '%s' directory." %
                   (args.replay_jobid, resultsdir))
            LOG_UI.error(msg)
            sys.exit(exit_codes.AVOCADO_FAIL)
        setattr(args, 'replay_sourcejob', sourcejob)

        replay_args = jobdata.retrieve_args(resultsdir)
        whitelist = [
            'loaders', 'external_runner', 'external_runner_testdir',
            'external_runner_chdir', 'failfast', 'ignore_missing_references',
            'execution_order'
        ]
        if replay_args is None:
            LOG_UI.warn(
                'Source job args data not found. These options will '
                'not be loaded in this replay job: %s', ', '.join(whitelist))
        else:
            for option in whitelist:
                optvalue = getattr(args, option, None)
                if optvalue is not None:
                    LOG_UI.warn(
                        "Overriding the replay %s with the --%s value "
                        "given on the command line.", option.replace('_', '-'),
                        option.replace('_', '-'))
                elif option in replay_args:
                    setattr(args, option, replay_args[option])

        if getattr(args, 'reference', None):
            LOG_UI.warn('Overriding the replay test references with test '
                        'references given in the command line.')
        else:
            references = jobdata.retrieve_references(resultsdir)
            if references is None:
                LOG_UI.error('Source job test references data not found. '
                             'Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                setattr(args, 'reference', references)

        if 'config' in args.replay_ignore:
            LOG_UI.warn("Ignoring configuration from source job with "
                        "--replay-ignore.")
        else:
            self.load_config(resultsdir)

        if 'variants' in args.replay_ignore:
            LOG_UI.warn("Ignoring variants from source job with "
                        "--replay-ignore.")
        else:
            variants = jobdata.retrieve_variants(resultsdir)
            if variants is None:
                LOG_UI.error('Source job variants data not found. Aborting.')
                sys.exit(exit_codes.AVOCADO_FAIL)
            else:
                LOG_UI.warning("Using src job Mux data only, use "
                               "`--replay-ignore variants` to override "
                               "them.")
                setattr(args, "avocado_variants", variants)

        # Extend "replay_test_status" of "INTERRUPTED" when --replay-resume
        # supplied.
        if args.replay_resume:
            if not args.replay_teststatus:
                args.replay_teststatus = ["INTERRUPTED"]
            elif "INTERRUPTED" not in args.replay_teststatus:
                args.replay_teststatus.append("INTERRUPTED")
        if args.replay_teststatus:
            replay_map = self._create_replay_map(resultsdir,
                                                 args.replay_teststatus)
            setattr(args, 'replay_map', replay_map)

        # Use the original directory to resolve test references properly
        pwd = jobdata.retrieve_pwd(resultsdir)
        if pwd is not None:
            if os.path.exists(pwd):
                os.chdir(pwd)
            else:
                LOG_UI.warn(
                    "Directory used in the replay source job '%s' does"
                    " not exist, using '.' instead", pwd)
Exemple #41
0
 def _print_job_details(self, details):
     for key, value in details.items():
         LOG_UI.info("%-12s: %s", key, value)
Exemple #42
0
    def initialize(self, args):
        # Deprecated filters
        only = getattr(args, "filter_only", None)
        if only:
            self._log_deprecation_msg("--filter-only", "--mux-filter-only")
            mux_filter_only = getattr(args, "mux_filter_only")
            if mux_filter_only:
                args.mux_filter_only = mux_filter_only + only
            else:
                args.mux_filter_only = only
        out = getattr(args, "filter_out", None)
        if out:
            self._log_deprecation_msg("--filter-out", "--mux-filter-out")
            mux_filter_out = getattr(args, "mux_filter_out")
            if mux_filter_out:
                args.mux_filter_out = mux_filter_out + out
            else:
                args.mux_filter_out = out

        debug = getattr(args, "mux_debug", False)
        if debug:
            data = mux.MuxTreeNodeDebug()
        else:
            data = mux.MuxTreeNode()

        # Merge the multiplex
        multiplex_files = getattr(args, "mux_yaml", None)
        if multiplex_files:
            try:
                data.merge(create_from_yaml(multiplex_files, debug))
            except IOError as details:
                error_msg = "%s : %s" % (details.strerror, details.filename)
                LOG_UI.error(error_msg)
                if args.subcommand == 'run':
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                else:
                    sys.exit(exit_codes.AVOCADO_FAIL)

        # Deprecated --multiplex option
        multiplex_files = getattr(args, "multiplex", None)
        if multiplex_files:
            self._log_deprecation_msg("--multiplex", "--mux-yaml")
            try:
                data.merge(create_from_yaml(multiplex_files, debug))
                from_yaml = create_from_yaml(multiplex_files, debug)
                args.avocado_variants.data_merge(from_yaml)
            except IOError as details:
                error_msg = "%s : %s" % (details.strerror, details.filename)
                LOG_UI.error(error_msg)
                if args.subcommand == 'run':
                    sys.exit(exit_codes.AVOCADO_JOB_FAIL)
                else:
                    sys.exit(exit_codes.AVOCADO_FAIL)

        # Extend default multiplex tree of --mux-inject values
        for inject in getattr(args, "mux_inject", []):
            entry = inject.split(':', 3)
            if len(entry) < 2:
                raise ValueError("key:entry pairs required, found only %s"
                                 % (entry))
            elif len(entry) == 2:   # key, entry
                entry.insert(0, '')  # add path='' (root)
            data.get_node(entry[0], True).value[entry[1]] = entry[2]

        mux_filter_only = getattr(args, 'mux_filter_only', None)
        mux_filter_out = getattr(args, 'mux_filter_out', None)
        data = mux.apply_filters(data, mux_filter_only, mux_filter_out)
        if data != mux.MuxTreeNode():
            mux_path = getattr(args, "mux_path", ["/run/*"])
            if mux_path is None:
                mux_path = ["/run/*"]
            self.initialize_mux(data, mux_path, debug)
Exemple #43
0
 def render(self, result, job):
     if (getattr(job.args, 'resultsdb_logs', None) is not None and
             getattr(job.args, 'stdout_claimed_by', None) is None):
         LOG_UI.info("JOB URL    : %s/%s" % (job.args.resultsdb_logs,
                                             os.path.basename(job.logdir)))
Exemple #44
0
 def list(self):
     try:
         self._list()
     except KeyboardInterrupt:
         LOG_UI.error('Command interrupted by user...')
         return exit_codes.AVOCADO_FAIL
Exemple #45
0
 def run(self, config):
     if 'html_output' in config and config.get('html_output') == '-':
         LOG_UI.error('HTML to stdout not supported (not all HTML resources'
                      ' can be embedded on a single file)')
         sys.exit(exit_codes.AVOCADO_JOB_FAIL)
def update(config, tag=""):
    """
    Update all states (run all tests) from the state defined as
    ``from_state=<state>`` to the state defined as ``to_state=<state>``.

    :param config: command line arguments and run configuration
    :type config: {str, str}
    :param str tag: extra name identifier for the test to be run

    Thus, a change in a state can be reflected in all the dependent states.

    Only singleton test setup is supported within the update setup path since
    we cannot guarantee other setup involved vms exist.

    .. note:: If you want to update the install state, you also need to change the default
        'from_state=install' to 'from_state=root'. You cannot update the root as this is
        analogical to running the full manual step.
    """
    l, r = config["graph"].l, config["graph"].r
    selected_vms = sorted(config["vm_strs"].keys())
    LOG_UI.info("Starting update setup for %s (%s)",
                ", ".join(selected_vms), os.path.basename(r.job.logdir))

    for vm_name in selected_vms:
        vm_params = config["vms_params"].object_params(vm_name)
        from_state = vm_params.get("from_state", "install")
        to_state = vm_params.get("to_state", "customize")
        if to_state == "install":
            logging.warning("The root install state of %s cannot be updated - use 'setup=full' instead.", vm_name)
            continue
        logging.info("Updating state '%s' of %s", to_state, vm_name)

        logging.info("Tracing and removing all old states depending on the updated '%s'...", to_state)
        setup_dict = config["param_dict"].copy()
        setup_dict["unset_mode"] = "fi"
        setup_str = vm_params.get("remove_set", "leaves")
        for restriction in config["available_restrictions"]:
            if restriction in setup_str:
                break
        else:
            setup_str = "all.." + setup_str
        setup_str = param.re_str(setup_str)
        # remove all test nodes depending on the updated node if present (unset mode is "ignore otherwise")
        remove_graph = l.parse_object_trees(setup_dict,
                                            setup_str,
                                            config["available_vms"],
                                            prefix=tag, verbose=False)
        remove_graph.flag_children(flag_type="run", flag=False)
        remove_graph.flag_children(flag_type="clean", flag=False, skip_roots=True)
        remove_graph.flag_children(to_state, vm_name, flag_type="clean", flag=True, skip_roots=True)
        r.run_traversal(remove_graph, {"vms": vm_name, **config["param_dict"]})

        logging.info("Updating all states before '%s'", to_state)
        setup_dict = config["param_dict"].copy()
        setup_dict["vms"] = vm_name
        # NOTE: this makes sure that no new states are created and the updated
        # states are not removed, aborting in any other case
        setup_dict.update({"get_mode": "ra", "set_mode": "fa", "unset_mode": "ra"})
        update_graph = l.parse_object_trees(setup_dict,
                                            param.re_str("all.." + to_state),
                                            {vm_name: config["vm_strs"][vm_name]}, prefix=tag)
        update_graph.flag_parent_intersection(update_graph, flag_type="run", flag=False)
        update_graph.flag_parent_intersection(update_graph, flag_type="run", flag=True,
                                              skip_object_roots=True, skip_shared_root=True)
        logging.info("Preserving all states before '%s'", from_state)
        if from_state != "install":
            setup_dict = config["param_dict"].copy()
            setup_dict["vms"] = vm_name
            reuse_graph = l.parse_object_trees(setup_dict,
                                               param.re_str("all.." + from_state),
                                               {vm_name: config["vm_strs"][vm_name]},
                                               prefix=tag, verbose=False)
            update_graph.flag_parent_intersection(reuse_graph, flag_type="run", flag=False)
        r.run_traversal(update_graph, setup_dict)

    LOG_UI.info("Finished update setup")
Exemple #47
0
 def run(self, config):
     msg = config.get('hello.message')
     LOG_UI.info(msg)
Exemple #48
0
    def _display(self, test_matrix, stats, tag_stats):
        header = None
        if self.args.verbose:
            header = (output.TERM_SUPPORT.header_str('Type'),
                      output.TERM_SUPPORT.header_str('Test'),
                      output.TERM_SUPPORT.header_str('Tag(s)'))

        for line in astring.iter_tabular_output(test_matrix,
                                                header=header,
                                                strip=True):
            LOG_UI.debug(line)

        if self.args.verbose:
            LOG_UI.info("")
            LOG_UI.info("TEST TYPES SUMMARY")
            LOG_UI.info("==================")
            for key in sorted(stats):
                LOG_UI.info("%s: %s", key.upper(), stats[key])

            if tag_stats:
                LOG_UI.info("")
                LOG_UI.info("TEST TAGS SUMMARY")
                LOG_UI.info("=================")
                for key in sorted(tag_stats):
                    LOG_UI.info("%s: %s", key, tag_stats[key])
Exemple #49
0
    def run(self, config):
        def _get_name(test):
            return str(test["id"])

        def _get_name_no_id(test):
            return str(test["id"]).split("-", 1)[1]

        job1_dir, job1_id = self._setup_job(config.get("diff.jobids")[0])
        job2_dir, job2_id = self._setup_job(config.get("diff.jobids")[1])

        job1_data = self._get_job_data(job1_dir)
        job2_data = self._get_job_data(job2_dir)

        report_header = "Avocado Job Report\n"
        job1_results = [report_header]
        job2_results = [report_header]

        diff_filter = config.get("diff.filter")
        if "cmdline" in diff_filter:
            cmdline1 = self._get_command_line(job1_dir)
            cmdline2 = self._get_command_line(job2_dir)

            if str(cmdline1) != str(cmdline2):
                command_line_header = ["\n", "# COMMAND LINE\n"]
                job1_results.extend(command_line_header)
                job1_results.append(cmdline1)
                job2_results.extend(command_line_header)
                job2_results.append(cmdline2)

        if "time" in diff_filter:
            time1 = f"{job1_data['time']:.2f} s\n"
            time2 = f"{job2_data['time']:.2f} s\n"

            if str(time1) != str(time2):
                total_time_header = ["\n", "# TOTAL TIME\n"]
                job1_results.extend(total_time_header)
                job1_results.append(time1)
                job2_results.extend(total_time_header)
                job2_results.append(time2)

        if "variants" in diff_filter:
            variants1 = self._get_variants(job1_dir)
            variants2 = self._get_variants(job2_dir)

            if str(variants1) != str(variants2):
                variants_header = ["\n", "# VARIANTS\n"]
                job1_results.extend(variants_header)
                job1_results.extend(variants1)
                job2_results.extend(variants_header)
                job2_results.extend(variants2)

        if "results" in diff_filter:
            results1 = []
            if config.get("diff.strip_id"):
                get_name = _get_name_no_id
            else:
                get_name = _get_name
            for test in job1_data["tests"]:
                test_result = f"{get_name(test)}: {str(test['status'])}\n"
                results1.append(test_result)
            results2 = []
            for test in job2_data["tests"]:
                test_result = f"{get_name(test)}: {str(test['status'])}\n"
                results2.append(test_result)

            if str(results1) != str(results2):
                test_results_header = ["\n", "# TEST RESULTS\n"]
                job1_results.extend(test_results_header)
                job1_results.extend(results1)
                job2_results.extend(test_results_header)
                job2_results.extend(results2)

        if "config" in diff_filter:
            config1 = self._get_config(job1_dir)
            config2 = self._get_config(job2_dir)

            if str(config1) != str(config2):
                config_header = ["\n", "# SETTINGS\n"]
                job1_results.extend(config_header)
                job1_results.extend(config1)
                job2_results.extend(config_header)
                job2_results.extend(config2)

        if "sysinfo" in diff_filter:
            sysinfo_pre1 = self._get_sysinfo(job1_dir, "pre")
            sysinfo_pre2 = self._get_sysinfo(job2_dir, "pre")

            if str(sysinfo_pre1) != str(sysinfo_pre2):
                sysinfo_header_pre = ["\n", "# SYSINFO PRE\n"]
                job1_results.extend(sysinfo_header_pre)
                job1_results.extend(sysinfo_pre1)
                job2_results.extend(sysinfo_header_pre)
                job2_results.extend(sysinfo_pre2)

            sysinfo_post1 = self._get_sysinfo(job1_dir, "post")
            sysinfo_post2 = self._get_sysinfo(job2_dir, "post")

            if str(sysinfo_post1) != str(sysinfo_post2):
                sysinfo_header_post = ["\n", "# SYSINFO POST\n"]
                job1_results.extend(sysinfo_header_post)
                job1_results.extend(sysinfo_post1)
                job2_results.extend(sysinfo_header_post)
                job2_results.extend(sysinfo_post2)

        if config.get("diff.create_reports"):
            self.std_diff_output = False
            prefix = f"avocado_diff_{job1_id[:7]}_"
            tmp_file1 = tempfile.NamedTemporaryFile(mode="w",
                                                    prefix=prefix,
                                                    suffix=".txt",
                                                    delete=False)
            tmp_file1.writelines(job1_results)
            tmp_file1.close()

            prefix = f"avocado_diff_{job2_id[:7]}_"
            tmp_file2 = tempfile.NamedTemporaryFile(mode="w",
                                                    prefix=prefix,
                                                    suffix=".txt",
                                                    delete=False)
            tmp_file2.writelines(job2_results)
            tmp_file2.close()

            LOG_UI.info("%s %s", tmp_file1.name, tmp_file2.name)

        html_file = config.get("diff.html")
        open_browser = config.get("diff.open_browser")
        if open_browser and html_file is None:
            prefix = f"avocado_diff_{job1_id[:7]}_{job2_id[:7]}_"
            tmp_file = tempfile.NamedTemporaryFile(mode="w",
                                                   prefix=prefix,
                                                   suffix=".html",
                                                   delete=False)

            html_file = tmp_file.name

        if html_file is not None:
            self.std_diff_output = False
            try:
                html_diff = HtmlDiff()
                # pylint: disable=W0212
                html_diff._legend = """
                    <table class="diff" summary="Legends">
                    <tr> <td> <table border="" summary="Colors">
                    <tr><th> Colors </th> </tr>
                    <tr><td class="diff_add">&nbsp;Added&nbsp;</td></tr>
                    <tr><td class="diff_chg">Changed</td> </tr>
                    <tr><td class="diff_sub">Deleted</td> </tr>
                    </table></td>
                    <td> <table border="" summary="Links">
                    <tr><th colspan="2"> Links </th> </tr>
                    <tr><td>(f)irst change</td> </tr>
                    <tr><td>(n)ext change</td> </tr>
                    <tr><td>(t)op</td> </tr>
                    </table></td> </tr>
                    </table>"""

                job_diff_html = html_diff.make_file(
                    (_ for _ in job1_results),
                    (_ for _ in job2_results),
                    fromdesc=job1_id,
                    todesc=job2_id,
                )

                with open(html_file, "w", encoding="utf-8") as fp:
                    fp.writelines(job_diff_html)
                LOG_UI.info(html_file)

            except IOError as exception:
                LOG_UI.error(exception)
                sys.exit(exit_codes.AVOCADO_FAIL)

        if open_browser:
            setsid = getattr(os, "setsid", None)
            if not setsid:
                setsid = getattr(os, "setpgrp", None)
            with open(os.devnull, "r+", encoding="utf-8") as inout:
                cmd = ["xdg-open", html_file]
                subprocess.Popen(  # pylint: disable=W1509
                    cmd,
                    close_fds=True,
                    stdin=inout,
                    stdout=inout,
                    stderr=inout,
                    preexec_fn=setsid,
                )

        if self.std_diff_output:
            if self.term.enabled:
                for line in self._cdiff(
                        unified_diff(job1_results,
                                     job2_results,
                                     fromfile=job1_id,
                                     tofile=job2_id)):
                    LOG_UI.debug(line.strip())
            else:
                for line in unified_diff(job1_results,
                                         job2_results,
                                         fromfile=job1_id,
                                         tofile=job2_id):
                    LOG_UI.debug(line.strip())
Exemple #50
0
    def discover(self,
                 references,
                 which_tests=DiscoverMode.DEFAULT,
                 force=None):
        """
        Discover (possible) tests from test references.

        :param references: a list of tests references; if [] use plugin defaults
        :type references: builtin.list
        :param which_tests: Limit tests to be displayed
        :type which_tests: :class:`DiscoverMode`
        :param force: don't raise an exception when some test references
                      are not resolved to tests.
        :return: A list of test factories (tuples (TestClass, test_params))
        """
        def handle_exception(plugin, details):
            # FIXME: Introduce avocado.exceptions logger and use here
            stacktrace.log_message(
                "Test discovery plugin %s failed: "
                "%s" % (plugin, details), LOG_UI.getChild("exceptions"))
            # FIXME: Introduce avocado.traceback logger and use here
            stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug"))

        tests = []
        unhandled_references = []
        if not references:
            for loader_plugin in self._initialized_plugins:
                try:
                    tests.extend(loader_plugin.discover(None, which_tests))
                except Exception as details:  # pylint: disable=W0703
                    handle_exception(loader_plugin, details)
        else:
            for reference in references:
                handled = False
                for loader_plugin in self._initialized_plugins:
                    try:
                        _test = loader_plugin.discover(reference, which_tests)
                        if _test:
                            tests.extend(_test)
                            handled = True
                            if which_tests != DiscoverMode.ALL:
                                break  # Don't process other plugins
                    except Exception as details:  # pylint: disable=W0703
                        handle_exception(loader_plugin, details)
                if not handled:
                    unhandled_references.append(reference)
        if unhandled_references:
            if which_tests == DiscoverMode.ALL:
                tests.extend([(MissingTest, {
                    'name': reference
                }) for reference in unhandled_references])
            else:
                # This is a workaround to avoid changing the method signature
                if force is True or force == 'on':
                    LOG_UI.error(
                        LoaderUnhandledReferenceError(
                            unhandled_references, self._initialized_plugins))
                else:
                    raise LoaderUnhandledReferenceError(
                        unhandled_references, self._initialized_plugins)
        self._update_mappings()
        return tests