Exemplo n.º 1
0
    def _checkConfigDump(self, config, dump):
        """Check if the parser output matches a config file dump"""
        configpath = os.path.join(testdatadir, config)
        dumppath = os.path.join(testdatadir, dump)

        if dumppath.endswith('.gz'):
            df = gzip.GzipFile(dumppath, 'r')
        else:
            df = open(dumppath, 'r')
        # we could have used pickle, but repr()-based dumps are easier to
        # enerate, debug, and edit
        dumpdata = eval(df.read())

        p = multiplex_config.Parser(configpath)
        self._checkDictionaries(p, dumpdata)
Exemplo n.º 2
0
    def multiplex(self, args):
        bcolors = output.colors
        pipe = output.get_paginator()
        multiplex_file = os.path.abspath(args.multiplex_file)

        if not os.path.isfile(multiplex_file):
            pipe.write(
                bcolors.fail_header_str('Invalid multiplex file %s' %
                                        multiplex_file))
            sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])

        try:
            parser = multiplex_config.Parser(filename=multiplex_file)
        except multiplex_config.ParserError, details:
            fail_class = details.__class__.__name__
            pipe.write(
                bcolors.fail_header_str(
                    "Multiplex file '%s' has a syntax error\n" %
                    multiplex_file))
            pipe.write('%s: %s\n' % (fail_class, details))
            pipe.write(bcolors.fail_header_str('Aborting...'))
            sys.exit(error_codes.numeric_status['AVOCADO_JOB_FAIL'])
Exemplo n.º 3
0
    def _checkStringDump(self, string, dump, defaults=False):
        p = multiplex_config.Parser(defaults=defaults)
        p.parse_string(string)

        self._checkDictionaries(p, dump)
Exemplo n.º 4
0
 def _checkStringConfig(self, string, reference):
     p = multiplex_config.Parser()
     p.parse_string(string)
     self._checkDictionaries(p, reference)
Exemplo n.º 5
0
    def _run(self, urls=None, multiplex_file=None):
        """
        Unhandled job method. Runs a list of test URLs to its completion.

        :param urls: String with tests to run.
        :param multiplex_file: File that multiplexes a given test url.

        :return: Integer with overall job status. See
                 :mod:`avocado.core.error_codes` for more information.
        :raise: Any exception (avocado crashed), or
                :class:`avocado.core.exceptions.JobBaseException` errors,
                that configure a job failure.
        """
        params_list = []
        if urls is None:
            if self.args and self.args.url is not None:
                urls = self.args.url.split()
        else:
            if isinstance(urls, str):
                urls = urls.split()

        if urls is not None:
            for url in urls:
                params_list.append({'shortname': url})

        if multiplex_file is None:
            if self.args and self.args.multiplex_file is not None:
                multiplex_file = os.path.abspath(self.args.multiplex_file)
        else:
            multiplex_file = os.path.abspath(multiplex_file)

        if multiplex_file is not None:
            params_list = []
            if urls is not None:
                for url in urls:
                    parser = multiplex_config.Parser(multiplex_file)
                    parser.only_filter(url)
                    dcts = [d for d in parser.get_dicts()]
                    if dcts:
                        for dct in dcts:
                            params_list.append(dct)
                    else:
                        params_list.append({'shortname': url})
            else:
                parser = multiplex_config.Parser(multiplex_file)
                for dct in parser.get_dicts():
                    params_list.append(dct)

        test_result = self._make_test_result(params_list)
        self.test_runner = self._make_test_runner(test_result)

        self.output_manager.start_file_logging(self.debuglog, self.loglevel)
        self.output_manager.debuglog = self.debuglog
        failures = self.test_runner.run(params_list)
        self.output_manager.stop_file_logging()

        # If it's all good so far, set job status to 'PASS'
        if self.status == 'RUNNING':
            self.status = 'PASS'
        # Let's clean up test artifacts
        if self.args is not None:
            if self.args.archive:
                archive.create_zip(self.debugdir, self.debugdir)
            if not self.args.keep_tmp_files:
                data_dir.clean_tmp_files()

        tests_status = not bool(failures)
        if tests_status:
            return error_codes.numeric_status['AVOCADO_ALL_OK']
        else:
            return error_codes.numeric_status['AVOCADO_TESTS_FAIL']