Ejemplo n.º 1
0
def main():
    args = parse_args()
    suites = create_suites(args)
    # ========================================================================
    # Print features covered in this test
    # ========================================================================
    if args.features:
        features = []
        for suite in suites:
            for variants in suite.config['run.dict_variants']:
                features.append(variants['namespace'])

        unique_features = sorted(set(features))
        print('Features covered (%i):' % len(unique_features))
        print('\n'.join(unique_features))
        exit(0)

    # ========================================================================
    # Job execution
    # ========================================================================
    config = {'core.show': ['app'],
              'run.test_runner': 'nrunner'}
    with Job(config, suites) as j:
        exit_code = j.run()
    print_failed_tests(j.get_failed_tests())
    return exit_code
Ejemplo n.º 2
0
def main():
    args = parse_args()
    suites = create_suites(args)
    # ========================================================================
    # Print features covered in this test
    # ========================================================================
    if args.features:
        features = []
        for suite in suites:
            for variants in suite.config['run.dict_variants']:
                if variants.get('namespace'):
                    features.append(variants['namespace'])

        unique_features = sorted(set(features))
        print('Features covered (%i):' % len(unique_features))
        print('\n'.join(unique_features))
        exit(0)

    # ========================================================================
    # Job execution
    # ========================================================================
    config = {'core.show': ['app'],
              'run.test_runner': 'nrunner'}

    # Workaround for travis problem on arm64 - https://github.com/avocado-framework/avocado/issues/4768
    if (platform.machine() == 'aarch64'):
        max_parallel = int(multiprocessing.cpu_count()/2)
        for suite in suites:
            if suite.name == 'check':
                suite.config['nrunner.max_parallel_tasks'] = max_parallel

    with Job(config, suites) as j:
        exit_code = j.run()
    print_failed_tests(j.get_failed_tests())
    return exit_code
Ejemplo n.º 3
0
 def test_setup(self):
     mock_vm = _FakeVM()
     mock_vm.start = unittest.mock.Mock(return_value=True)
     mock_vm.create_snapshot = unittest.mock.Mock()
     mock_vm.stop = unittest.mock.Mock()
     mock_vm.restore_snapshot = unittest.mock.Mock()
     job_args = {'test_result_total': 1,
                 'vm_domain': 'domain',
                 'vm_username': '******',
                 'vm_hostname': 'hostname',
                 'vm_port': 22,
                 'vm_password': '******',
                 'vm_key_file': None,
                 'vm_cleanup': True,
                 'vm_no_copy': False,
                 'vm_timeout': 120,
                 'vm_hypervisor_uri': 'my_hypervisor_uri',
                 'reference': ['/tests/sleeptest.py',
                               '/tests/other/test',
                               'passtest.py'],
                 'dry_run': True,
                 'env_keep': None,
                 'keep_tmp': 'on',
                 'base_logdir': self.tmpdir.name}
     with Job(job_args) as job:
         with unittest.mock.patch('avocado_runner_vm.vm_connect',
                                  return_value=mock_vm):
             # VMTestRunner()
             runner = avocado_runner_vm.VMTestRunner(job, None)
             runner.setup()
             runner.tear_down()
             mock_vm.start.assert_called_once_with()
             mock_vm.create_snapshot.assert_called_once_with()
             mock_vm.stop.assert_called_once_with()
             mock_vm.restore_snapshot.assert_called_once_with()
Ejemplo n.º 4
0
 def test_job_run_result_json_enabled(self):
     self.base_config['job.run.result.json.enabled'] = 'on'
     with Job(self.base_config) as j:
         result = j.run()
     self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
     json_results_path = os.path.join(self.tmpdir.name, 'latest', 'results.json')
     self.assertTrue(os.path.exists(json_results_path))
Ejemplo n.º 5
0
 def test_job_run_result_json_output(self):
     json_results_path = os.path.join(self.tmpdir.name, 'myresults.json')
     self.base_config['job.run.result.json.output'] = json_results_path
     with Job(self.base_config) as j:
         result = j.run()
     self.assertEqual(result, exit_codes.AVOCADO_ALL_OK)
     self.assertTrue(os.path.exists(json_results_path))
Ejemplo n.º 6
0
    def run_job(self):
        """Run a Job"""
        config = self.create_config()

        suite = TestSuite.from_config(config, '')

        # run the job
        with Job(config, [suite]) as j:
            result = j.run()

        return result
Ejemplo n.º 7
0
def main(args):  # pylint: disable=W0621

    # ========================================================================
    # Print features covered in this test
    # ========================================================================
    if args.list_features:
        suites = create_suite_job_api(args)
        suites += create_suites(args)
        features = []
        for suite in suites:
            for variants in suite.config['run.dict_variants']:
                if variants.get('namespace'):
                    features.append(variants['namespace'])

        unique_features = sorted(set(features))
        print('Features covered (%i):' % len(unique_features))
        print('\n'.join(unique_features))
        exit(0)

    if not any([
            args.static_checks, args.job_api, args.nrunner_interface,
            args.unit, args.jobs, args.functional, args.optional_plugins,
            args.list_features
    ]):
        print("No test were selected to run, running all of them.")
        enable_all_tests(args)

    suites = []
    if args.job_api:
        suites += create_suite_job_api(args)
    suites += create_suites(args)

    # ========================================================================
    # Job execution
    # ========================================================================
    config = {
        'core.show': ['app'],
        'run.job_category': 'avocado-selftests',
        'job.output.testlogs.statuses': ['FAIL', 'ERROR', 'INTERRUPT'],
        'job.output.testlogs.logfiles': ['debug.log']
    }

    # Workaround for travis problem on arm64 - https://github.com/avocado-framework/avocado/issues/4768
    if (platform.machine() == 'aarch64'):
        max_parallel = int(multiprocessing.cpu_count() / 2)
        for suite in suites:
            if suite.name == 'check':
                suite.config['nrunner.max_parallel_tasks'] = max_parallel

    with Job(config, suites) as j:
        exit_code = j.run()
    print_failed_tests(j.get_failed_tests())
    return exit_code
Ejemplo n.º 8
0
    def test_check_directory_exists(self):
        """Test to check if a directory was created."""
        config = self.create_config()

        suite = TestSuite.from_config(config)

        # run the job
        with Job(config, [suite]) as j:
            result = j.run()

        # Asserts
        self.check_exit_code(result)
        self.check_directory_exists()
Ejemplo n.º 9
0
    def test_check_output_file(self):
        """Test to check if the file passed as parameter was created."""
        config = self.create_config(self.workdir_file_path)

        suite = TestSuite.from_config(config)

        # run the job
        with Job(config, [suite]) as j:
            result = j.run()

        # Asserts
        self.check_exit_code(result)
        self.check_file_exists(self.workdir_file_path)
Ejemplo n.º 10
0
    def test_check_archive_file_exists(self):
        """Test to check the archive file was created."""
        config = self.create_config()

        suite = TestSuite.from_config(config)

        # run the job
        with Job(config, [suite]) as j:
            result = j.run()
            logdir = j.logdir

        # Asserts
        self.check_exit_code(result)
        archive_path = '%s.zip' % logdir
        self.check_file_exists(archive_path)
Ejemplo n.º 11
0
    def test_check_category_directory_exists(self):
        """Test to check if the category directory was created."""
        config = self.create_config()

        suite = TestSuite.from_config(config)

        # run the job
        with Job(config, [suite]) as j:
            result = j.run()
            logdir = j.logdir

        # Asserts
        self.check_exit_code(result)

        value = self.params.get('value')
        category_path = os.path.join(os.path.dirname(logdir), value)
        self.check_directory_exists(category_path)
Ejemplo n.º 12
0
def main():

    epilog = """Examples:

    $ avocado-external-runner curl redhat.com
    $ avocado-external-runner curl "redhat.com -v" google.com

 Note: If you have multiple arguments please use quotes as in
 the example above.
 """

    parser = argparse.ArgumentParser(
        description="Process some integers.",
        formatter_class=argparse.RawTextHelpFormatter,
        epilog=epilog,
    )
    parser.add_argument(
        "runner",
        metavar="RUNNER",
        type=str,
        help="The external runner to process the arguments.",
    )
    parser.add_argument(
        "args",
        metavar="ARGS",
        type=str,
        nargs="+",
        help=("Arguments to be executed. If you have multiple "
              "arguments, please quote them."),
    )

    args = parser.parse_args()
    tests = []
    for arg in args.args:
        runnable = Runnable.from_args({
            "kind": "exec-test",
            "uri": find_command(args.runner),
            "arg": arg.split()
        })
        tests.append(runnable)

    config = {"runner.identifier_format": "{uri}-{args[0]}"}
    suite = TestSuite(tests=tests, name="external-runner", config=config)
    with Job({}, [suite]) as j:
        sys.exit(j.run())
Ejemplo n.º 13
0
 def test_setup(self):
     mock_vm = _FakeVM()
     mock_vm.start = mock.Mock(return_value=True)
     mock_vm.create_snapshot = mock.Mock()
     mock_vm.stop = mock.Mock()
     mock_vm.restore_snapshot = mock.Mock()
     job_args = argparse.Namespace(test_result_total=1,
                                   vm_domain='domain',
                                   vm_username='******',
                                   vm_hostname='hostname',
                                   vm_port=22,
                                   vm_password='******',
                                   vm_key_file=None,
                                   vm_cleanup=True,
                                   vm_no_copy=False,
                                   vm_timeout=120,
                                   vm_hypervisor_uri='my_hypervisor_uri',
                                   reference=[
                                       '/tests/sleeptest.py',
                                       '/tests/other/test', 'passtest.py'
                                   ],
                                   dry_run=True,
                                   env_keep=None)
     job = None
     try:
         job = Job(job_args)
         job.setup()
         with mock.patch('avocado_runner_vm.vm_connect',
                         return_value=mock_vm):
             # VMTestRunner()
             runner = avocado_runner_vm.VMTestRunner(job, None)
             runner.setup()
             runner.tear_down()
             mock_vm.start.assert_called_once_with()
             mock_vm.create_snapshot.assert_called_once_with()
             mock_vm.stop.assert_called_once_with()
             mock_vm.restore_snapshot.assert_called_once_with()
     finally:
         if job:
             shutil.rmtree(job.args.base_logdir)
Ejemplo n.º 14
0
def main():

    epilog = """Examples:

    $ avocado-external-runner curl redhat.com
    $ avocado-external-runner curl "redhat.com -v" google.com

 Note: If you have multiple arguments please use quotes as in
 the example above.
 """

    parser = argparse.ArgumentParser(
        description='Process some integers.',
        formatter_class=argparse.RawTextHelpFormatter,
        epilog=epilog)
    parser.add_argument('runner',
                        metavar='RUNNER',
                        type=str,
                        help='The external runner to process the arguments.')
    parser.add_argument('args',
                        metavar='ARGS',
                        type=str,
                        nargs='+',
                        help=('Arguments to be executed. If you have multiple '
                              'arguments, please quote them.'))

    args = parser.parse_args()
    tests = []
    for arg in args.args:
        runnable = Runnable.from_args({
            'kind': 'exec-test',
            'uri': find_command(args.runner),
            'arg': arg.split()
        })
        tests.append(runnable)

    config = {"runner.identifier_format": "{uri}-{args[0]}"}
    suite = TestSuite(tests=tests, name="external-runner", config=config)
    with Job({}, [suite]) as j:
        sys.exit(j.run())
Ejemplo n.º 15
0
# command line helper, wrap command line ops to avocado friendly way
#
# Example: avocado-run-inplace.py dnf update -y
#

import sys

from avocado.core.job import Job
from avocado.core.nrunner import Runnable
from avocado.core.suite import TestSuite
from avocado.utils.path import find_command


def create_runnable_from_command(command_parts):
    executable = find_command(command_parts[0])
    return Runnable('exec-test',
                    executable,
                    *command_parts[1:])


if __name__ == '__main__':
    command = sys.argv[1:]
    if not command:
        print("ERROR: no command given", file=sys.stderr)
        sys.exit(2)

    suite = TestSuite(tests=[create_runnable_from_command(command)],
                      name="")
    with Job({}, [suite]) as j:
        sys.exit(j.run())
Ejemplo n.º 16
0
import sys

from avocado.core.job import Job
from avocado.core.suite import TestSuite

ORDERLY_CONFIG = {
    "resolver.references": ["/bin/true", "/bin/true", "/bin/last"],
    "nrunner.max_parallel_tasks": 1,
}

RANDOM_CONFIG = {
    "resolver.references": [
        "/bin/true",
        "/bin/true",
        "/bin/true",
        "/bin/true",
        "/bin/true",
        "/bin/last",
    ],
    "nrunner.shuffle":
    True,
    "nrunner.max_parallel_tasks":
    3,
}

with Job(test_suites=[
        TestSuite.from_config(ORDERLY_CONFIG, name="orderly"),
        TestSuite.from_config(RANDOM_CONFIG, name="random"),
]) as j:
    sys.exit(j.run())
Ejemplo n.º 17
0
def main(args):  # pylint: disable=W0621

    args.dict_tests = {
        'static-checks': False,
        'job-api': False,
        'nrunner-interface': False,
        'unit': False,
        'jobs': False,
        'functional': False,
        'optional-plugins': False,
    }

    # Make a list of strings instead of a list with a single string
    if len(args.disable_plugin_checks) > 0:
        args.disable_plugin_checks = args.disable_plugin_checks[0].split(",")
    if len(args.select) > 0:
        args.select = args.select[0].split(",")
    if len(args.skip) > 0:
        args.skip = args.skip[0].split(",")

    # Print features covered in this test
    if args.list_features:
        suites = create_suite_job_api(args)
        suites += create_suites(args)
        features = []
        for suite in suites:
            for variants in suite.config['run.dict_variants']:
                if variants.get('namespace'):
                    features.append(variants['namespace'])

        unique_features = sorted(set(features))
        print('Features covered (%i):' % len(unique_features))
        print('\n'.join(unique_features))
        exit(0)

    # Will only run the test you select, --select must be followed by list of tests
    elif args.select:
        for elem in args.select:
            if elem not in args.dict_tests.keys():
                print(elem, "is not in the list of valid tests.")
                exit(0)
            else:
                args.dict_tests[elem] = True

    # Will run all the tests except these you skip, --skip must be followed by list of tests
    elif args.skip:
        # Make all the values True, so later we set to False the tests we don't want to run
        args.dict_tests = {x: True for x in args.dict_tests}

        for elem in args.skip:
            if elem not in args.dict_tests.keys():
                print(elem, "is not in the list of valid tests.")
                exit(0)
            else:
                args.dict_tests[elem] = False

    # If no option was selected, run all tests!
    elif not (args.skip or args.select):
        print("No test were selected to run, running all of them.")
        args.dict_tests = {x: True for x in args.dict_tests}

    else:
        print("Something went wrong, please report a bug!")
        exit(1)

    suites = []
    if args.dict_tests['job-api']:
        suites += create_suite_job_api(args)
    suites += create_suites(args)

    # ========================================================================
    # Job execution
    # ========================================================================
    config = {
        'run.job_category': 'avocado-selftests',
        'job.output.testlogs.statuses': ['FAIL', 'ERROR', 'INTERRUPT']
    }

    # Workaround for travis problem on arm64 - https://github.com/avocado-framework/avocado/issues/4768
    if (platform.machine() == 'aarch64'):
        max_parallel = int(multiprocessing.cpu_count() / 2)
        for suite in suites:
            if suite.name == 'functional-parallel':
                suite.config['nrunner.max_parallel_tasks'] = max_parallel

    with Job(config, suites) as j:
        exit_code = j.run()
    print_failed_tests(j.get_failed_tests())
    return exit_code
Ejemplo n.º 18
0
import sys

from avocado.core.job import Job
from avocado.core.nrunner import Runnable
from avocado.core.suite import TestSuite

config = {'run.test_runner': 'nrunner'}

# Custom method (no discovery, no guess, no magic)
# Since there is no magic, we need to pass a suite name, otherwise a uuid4 will
# be used for suite.name. Also run.references will be ignored (Avocado will not
# creating tests suites for you).

suite1 = TestSuite(config=config,
                   tests=[Runnable("noop", "noop")], name='suite1')
suite2 = TestSuite(config=config,
                   tests=[Runnable("noop", "noop")], name='suite2')

with Job(config, [suite1, suite2]) as j:
    sys.exit(j.run())
Ejemplo n.º 19
0
 def setUp(self):
     prefix = temp_dir_prefix(__name__, self, 'setUp')
     self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
     args = argparse.Namespace(base_logdir=self.tmpdir.name)
     self.job = Job(args)
     self.result = Result(self.job)
Ejemplo n.º 20
0
    def test_run_suite(self):
        """
        Test RemoteTestRunner.run_suite()

        The general idea of this test is to:

        1) Create the machinery necessary to get a RemoteTestRunner
           setup inside a job, or looking at it the other way around, to
           have a runner that is created with a valid job.

        2) Mock the interactions with a remote host.  This is done here
           basically by mocking 'Remote' and 'fabric' usage.

        3) Provide a polluted JSON to be parsed by the RemoteTestRunner

        4) Assert that those results are properly parsed into the
           job's result
        """
        job_args = argparse.Namespace(test_result_total=1,
                                      remote_username='******',
                                      remote_hostname='hostname',
                                      remote_port=22,
                                      remote_password='******',
                                      remote_key_file=None,
                                      remote_timeout=60,
                                      show_job_log=False,
                                      mux_yaml=[
                                          '~/avocado/tests/foo.yaml',
                                          '~/avocado/tests/bar/baz.yaml'
                                      ],
                                      dry_run=True,
                                      env_keep=None,
                                      reference=[
                                          '/tests/sleeptest.py',
                                          '/tests/other/test', 'passtest.py'
                                      ])

        job = None
        try:
            job = Job(job_args)
            job.setup()
            runner = avocado_runner_remote.RemoteTestRunner(job, job.result)
            return_value = (True, (version.MAJOR, version.MINOR))
            runner.check_remote_avocado = mock.Mock(return_value=return_value)

            # These are mocked at their source, and will prevent fabric from
            # trying to contact remote hosts
            with mock.patch('avocado_runner_remote.Remote'):
                runner.remote = avocado_runner_remote.Remote(
                    job_args.remote_hostname)

                # This is the result that the run_suite() will get from remote.run
                remote_run_result = process.CmdResult()
                remote_run_result.stdout = JSON_RESULTS
                remote_run_result.exit_status = 0
                runner.remote.run = mock.Mock(return_value=remote_run_result)

                # We have to fake the uncompressing and removal of the zip
                # archive that was never generated on the "remote" end
                # This test could be expand by mocking creating an actual
                # zip file instead, but it's really overkill
                with mock.patch('avocado_runner_remote.archive.uncompress'):
                    with mock.patch('avocado_runner_remote.os.remove'):
                        runner.run_suite(None, None, 61)

            # The job was created with dry_run so it should have a zeroed id
            self.assertEqual(job.result.job_unique_id, '0' * 40)
            self.assertEqual(job.result.tests_run, 1)
            self.assertEqual(job.result.passed, 1)
            cmd_line = ('avocado run --force-job-id '
                        '0000000000000000000000000000000000000000 --json - '
                        '--archive /tests/sleeptest.py /tests/other/test '
                        'passtest.py -m ~/avocado/tests/foo.yaml '
                        '~/avocado/tests/bar/baz.yaml --dry-run')
            runner.remote.run.assert_called_with(cmd_line,
                                                 ignore_status=True,
                                                 timeout=61)
        finally:
            if job:
                shutil.rmtree(job.args.base_logdir)
Ejemplo n.º 21
0
    def test_run_suite(self):
        """
        Test RemoteTestRunner.run_suite()

        The general idea of this test is to:

        1) Create the machinery necessary to get a RemoteTestRunner
           setup inside a job, or looking at it the other way around, to
           have a runner that is created with a valid job.

        2) Mock the interactions with a remote host.  This is done here
           basically by mocking 'Remote' and 'fabric' usage.

        3) Provide a polluted JSON to be parsed by the RemoteTestRunner

        4) Assert that those results are properly parsed into the
           job's result
        """
        job_args = {'test_result_total': 1,
                    'remote_username': '******',
                    'remote_hostname': 'hostname',
                    'remote_port': 22,
                    'remote_password': '******',
                    'remote_key_file': None,
                    'remote_timeout': 60,
                    'mux_yaml': ['~/avocado/tests/foo.yaml',
                                 '~/avocado/tests/bar/baz.yaml'],
                    'filter_by_tags': ["-foo", "-bar"],
                    'filter_by_tags_include_empty': False,
                    'env_keep': None,
                    'base_logdir': self.tmpdir.name,
                    'run.keep_tmp': 'on',
                    'run.store_logging_stream': [],
                    'run.dry_run.enabled': True,
                    'run.references': ['/tests/sleeptest.py',
                                       '/tests/other/test',
                                       'passtest.py'],
                    }

        with Job(job_args) as job:
            runner = avocado_runner_remote.RemoteTestRunner()
            return_value = (True, (version.MAJOR, version.MINOR))
            runner.check_remote_avocado = unittest.mock.Mock(return_value=return_value)

            # These are mocked at their source, and will prevent fabric from
            # trying to contact remote hosts
            with unittest.mock.patch('avocado_runner_remote.Remote'):
                remote_hostname = job_args.get('remote_hostname')
                runner.remote = avocado_runner_remote.Remote(remote_hostname)

                # This is the result that the run_suite() will get from remote.run
                remote_run_result = process.CmdResult()
                remote_run_result.stdout = JSON_RESULTS
                remote_run_result.exit_status = 0
                runner.remote.run = unittest.mock.Mock(return_value=remote_run_result)

                # We have to fake the uncompressing and removal of the zip
                # archive that was never generated on the "remote" end
                # This test could be expand by mocking creating an actual
                # zip file instead, but it's really overkill
                with unittest.mock.patch('avocado_runner_remote.archive.uncompress'):
                    with unittest.mock.patch('avocado_runner_remote.os.remove'):
                        runner.run_suite(job, job.result, None, None, 61)

        # The job was created with dry_run so it should have a zeroed id
        self.assertEqual(job.result.job_unique_id, '0' * 40)
        self.assertEqual(job.result.tests_run, 1)
        self.assertEqual(job.result.passed, 1)
        cmd_line = ('avocado run --force-job-id '
                    '0000000000000000000000000000000000000000 --json - '
                    '--archive /tests/sleeptest.py /tests/other/test '
                    'passtest.py --mux-yaml ~/avocado/tests/foo.yaml '
                    '~/avocado/tests/bar/baz.yaml --dry-run --filter-'
                    'by-tags -foo --filter-by-tags -bar')
        runner.remote.run.assert_called_with(cmd_line,
                                             ignore_status=True,
                                             timeout=61)
Ejemplo n.º 22
0
#!/usr/bin/env python3

import sys

from avocado.core.job import Job

config = {
    'run.references': ['examples/tests/passtest.py:PassTest.test'],
    'job.run.result.html.enabled': 'on',
    'run.open_browser': True
}

with Job(config) as j:
    sys.exit(j.run())
Ejemplo n.º 23
0
#!/usr/bin/env python3

import sys

from avocado.core.job import Job
from avocado.core.suite import TestSuite

config = {
    'resolver.references': ['examples/tests/sleeptest.py:SleepTest.test'],
    'run.dict_variants': [{
        'sleep_length': "0.5"
    }, {
        'sleep_length': "1.0"
    }]
}

suite = TestSuite.from_config(config)
with Job(config, [suite]) as j:
    sys.exit(j.run())
Ejemplo n.º 24
0
#!/usr/bin/env python3

import sys

from avocado.core.job import Job
from avocado.core.suite import TestSuite

ORDERLY_CONFIG = {
    'resolver.references': ['/bin/true', '/bin/true', '/bin/last'],
    'nrunner.max_parallel_tasks': 1
}

RANDOM_CONFIG = {
    'resolver.references': [
        '/bin/true', '/bin/true', '/bin/true', '/bin/true', '/bin/true',
        '/bin/last'
    ],
    'nrunner.shuffle':
    True,
    'nrunner.max_parallel_tasks':
    3
}

with Job(test_suites=[
        TestSuite.from_config(ORDERLY_CONFIG, name='orderly'),
        TestSuite.from_config(RANDOM_CONFIG, name='random')
]) as j:
    sys.exit(j.run())
Ejemplo n.º 25
0
#!/usr/bin/env python3

import sys

from avocado.core.job import Job
from avocado.core.nrunner.runnable import Runnable
from avocado.core.suite import TestSuite

# Custom method (no discovery, no guess, no magic)
# Since there is no magic, we need to pass a suite name, otherwise a uuid4 will
# be used for suite.name. Also resolver.references will be ignored (Avocado will not
# creating tests suites for you).

suite1 = TestSuite(name="suite1", tests=[Runnable("noop", "noop")])
suite2 = TestSuite(name="suite2", tests=[Runnable("noop", "noop")])
suite3 = TestSuite(name="suite3",
                   enabled=False,
                   tests=[Runnable("noop", "noop")])

with Job(test_suites=[suite1, suite2, suite3]) as j:
    sys.exit(j.run())
Ejemplo n.º 26
0
#!/bin/env python3

import os
import sys

from avocado.core.job import Job


ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

CONFIG = {
    'test_runner': 'nrunner',
    'nrun.references': [os.path.join(ROOT_DIR, 'selftests', 'unit'),
                        os.path.join(ROOT_DIR, 'selftests', 'functional')],
    'filter_by_tags': ['parallel:1'],
    # These are not currently supported by plugins/runner_nrunner.py, but better
    # be prepared
    'nrun.parallel_tasks': 1,
    'nrun.disable_task_randomization': True,
    }


if __name__ == '__main__':
    with Job(CONFIG) as j:
        os.environ['AVOCADO_CHECK_LEVEL'] = '3'
        sys.exit(j.run())
Ejemplo n.º 27
0
 def setUp(self):
     prefix = temp_dir_prefix(__name__, self, 'setUp')
     self.tmpdir = tempfile.mkdtemp(prefix=prefix)
     args = argparse.Namespace(base_logdir=self.tmpdir)
     self.job = Job(args)
     self.result = Result(self.job)
Ejemplo n.º 28
0
 def setUp(self):
     self.tmpdir = tempfile.mkdtemp(prefix="avocado_" + __name__)
     args = argparse.Namespace(base_logdir=self.tmpdir)
     self.job = Job(args)
     self.result = Result(self.job)
Ejemplo n.º 29
0
#!/usr/bin/env python3

import sys

from avocado.core.job import Job
from avocado.core.nrunner import Runnable
from avocado.core.suite import TestSuite

# an exec-test runnable consists of a runnable type (exec-test),
# an uri (examples/tests/sleeptest.sh), followed by zero to n arguments
# ending with zero to m keyword arguments.
#
# During the execution, arguments are appended to the uri and keyword
# arguments are converted to environment variable.

# here, SLEEP_LENGTH become an environment variable
sleeptest = Runnable('exec-test',
                     'examples/tests/sleeptest.sh',
                     SLEEP_LENGTH='2')
# here, 'Hello World!' is appended to the uri (/usr/bin/echo)
echo = Runnable('exec-test', '/usr/bin/echo', 'Hello World!')

# the execution of examples/tests/sleeptest.sh takes around 2 seconds
# and the output of the /usr/bin/echo test is available at the
# job-results/latest/test-results/exec-test-2-_usr_bin_echo/stdout file.
suite = TestSuite(name="exec-test", tests=[sleeptest, echo])

with Job(test_suites=[suite]) as j:
    sys.exit(j.run())
Ejemplo n.º 30
0
 def setUp(self):
     prefix = temp_dir_prefix(__name__, self, 'setUp')
     self.tmpdir = tempfile.TemporaryDirectory(prefix=prefix)
     args = {'base_logdir': self.tmpdir.name}
     self.job = Job(args)
     self.result = Result(self.job)