Exemplo n.º 1
0
    def create_test_suite(references):
        """
        Creates the test suite for this Job

        This is a public Job API as part of the documented Job phases

        NOTE: This is similar to avocado.core.Job.create_test_suite
        """
        try:
            suite = loader.loader.discover(references)
        except loader.LoaderError as details:
            stacktrace.log_exc_info(sys.exc_info(), LOG_UI.getChild("debug"))
            raise exceptions.OptionValidationError(details)

        if not suite:
            if references:
                references = " ".join(references)
                e_msg = ("No tests found for given test references, try "
                         "'avocado list -V %s' for details" % references)
            else:
                e_msg = ("No test references provided nor any other arguments "
                         "resolved into tests. Please double check the "
                         "executed command.")
            raise exceptions.OptionValidationError(e_msg)

        return suite
Exemplo n.º 2
0
 def _multiplex_params_list(self, params_list, multiplex_files):
     for mux_file in multiplex_files:
         if not os.path.exists(mux_file):
             e_msg = "Multiplex file %s doesn't exist." % mux_file
             raise exceptions.OptionValidationError(e_msg)
     result = []
     for params in params_list:
         try:
             variants = multiplexer.multiplex_yamls(multiplex_files,
                                                    self.args.filter_only,
                                                    self.args.filter_out)
         except SyntaxError:
             variants = None
         if variants:
             tag = 1
             for variant in variants:
                 env = {}
                 for t in variant:
                     env.update(dict(t.environment))
                 env.update({'tag': tag})
                 env.update({'id': params['id']})
                 result.append(env)
                 tag += 1
         else:
             result.append(params)
     return result
Exemplo n.º 3
0
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        self._setup_job_results()

        test_suite = self._make_test_suite(urls)
        self._validate_test_suite(test_suite)
        test_suite = self._filter_test_suite(test_suite)
        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, "
                     "filters, typos)")
            raise exceptions.OptionValidationError(e_msg)

        mux = multiplexer.Mux(self.args)
        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile,
                                     self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux,
                                              timeout=self.timeout)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value('runner.behavior', 'keep_tmp_files',
                                  key_type=bool, default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
Exemplo n.º 4
0
    def _handle_urls(self, urls):
        if urls is None:
            urls = getattr(self.args, 'url', None)

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        return urls
Exemplo n.º 5
0
    def _validate_test_suite(self, test_suite):
        try:
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)
Exemplo n.º 6
0
    def _run(self, urls=None, multiplex_files=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :param multiplex_files: File that multiplexes a given test url.

        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            if self.args and self.args.url is not None:
                urls = self.args.url

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        if multiplexer.MULTIPLEX_CAPABLE:
            if multiplex_files is None:
                if self.args and self.args.multiplex_files is not None:
                    multiplex_files = self.args.multiplex_files

            if multiplex_files is not None:
                params_list = self._multiplex_params_list(
                    params_list, multiplex_files)

        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            error_msg_parts = self.test_loader.validate_ui(test_suite)
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        if self.args is not None:
            self.args.test_result_total = len(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile, self.loglevel,
                                     self.unique_id)
        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite)
        self.view.stop_file_logging()
        self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if self.args is not None:
            if self.args.archive:
                filename = self.logdir + '.zip'
                archive.create(filename, self.logdir)
            if not self.args.keep_tmp_files:
                data_dir.clean_tmp_files()

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL
Exemplo n.º 7
0
Arquivo: job.py Projeto: ypu/avocado
    def _run(self, urls=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run, separated by whitespace.
                     Optionally, a list of tests (each test a string).
        :return: Integer with overall job status. See
                 :mod:`avocado.core.exit_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        if urls is None:
            urls = getattr(self.args, 'url', None)

        if isinstance(urls, str):
            urls = urls.split()

        if not urls:
            e_msg = "Empty test ID. A test path or alias must be provided"
            raise exceptions.OptionValidationError(e_msg)

        self._make_test_loader()

        params_list = self.test_loader.discover_urls(urls)

        mux = multiplexer.Mux(self.args)
        self._setup_job_results()

        try:
            test_suite = self.test_loader.discover(params_list)
            # Do not attempt to validate the tests given on the command line if
            # the tests will not be copied from this system to a remote one
            # using the remote plugin features
            if not getattr(self.args, 'remote_no_copy', False):
                error_msg_parts = self.test_loader.validate_ui(test_suite)
            else:
                error_msg_parts = []
        except KeyboardInterrupt:
            raise exceptions.JobError('Command interrupted by user...')

        if error_msg_parts:
            self._remove_job_results()
            e_msg = '\n'.join(error_msg_parts)
            raise exceptions.OptionValidationError(e_msg)

        if not test_suite:
            e_msg = ("No tests found within the specified path(s) "
                     "(Possible reasons: File ownership, permissions, typos)")
            raise exceptions.OptionValidationError(e_msg)

        self.args.test_result_total = mux.get_number_of_tests(test_suite)

        self._make_test_result()
        self._make_test_runner()
        self._start_sysinfo()

        self.view.start_file_logging(self.logfile, self.loglevel,
                                     self.unique_id)
        _TEST_LOGGER.info('Job ID: %s', self.unique_id)
        _TEST_LOGGER.info('')

        self.view.logfile = self.logfile
        failures = self.test_runner.run_suite(test_suite, mux)
        self.view.stop_file_logging()
        if not self.standalone:
            self._update_latest_link()
        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if getattr(self.args, 'archive', False):
            filename = self.logdir + '.zip'
            archive.create(filename, self.logdir)
        if not settings.get_value(
                'runner.behavior', 'keep_tmp_files', key_type=bool,
                default=False):
            data_dir.clean_tmp_files()
        _TEST_LOGGER.info('Test results available in %s', self.logdir)

        tests_status = not bool(failures)
        if tests_status:
            return exit_codes.AVOCADO_ALL_OK
        else:
            return exit_codes.AVOCADO_TESTS_FAIL