Exemplo n.º 1
0
    def setUp(self):
        self.loader = RegressionCheckLoader([
            'unittests/resources/checks_unlisted/dependencies/normal.py'
        ])

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')
Exemplo n.º 2
0
    def test_dependencies(self):
        self.loader = RegressionCheckLoader(
            ['unittests/resources/checks_unlisted/deps_complex.py'])

        # Setup the runner
        self.checks = self.loader.load_all()
        self.runall(self.checks, sort=True)

        self.assertRunall()
        stats = self.runner.stats
        assert stats.num_cases(0) == 10
        assert len(stats.failures()) == 4
        for tf in stats.failures():
            check = tf.testcase.check
            _, exc_value, _ = tf.exc_info
            if check.name == 'T7' or check.name == 'T9':
                assert isinstance(exc_value, TaskDependencyError)

        # Check that cleanup is executed properly for successful tests as well
        for t in stats.tasks():
            check = t.testcase.check
            if t.failed:
                continue

            if t.ref_count == 0:
                assert os.path.exists(os.path.join(check.outputdir, 'out.txt'))
Exemplo n.º 3
0
    def setUp(self):
        get_modules_system().searchpath_add(fixtures.TEST_MODULES)

        # Load a system configuration
        self.system, self.partition, self.progenv = fixtures.get_test_config()
        self.resourcesdir = tempfile.mkdtemp(dir='unittests')
        self.loader = RegressionCheckLoader(['unittests/resources'])
        self.resources = ResourcesManager(prefix=self.resourcesdir)
Exemplo n.º 4
0
class TestRegressionCheckLoader(unittest.TestCase):
    def setUp(self):
        self.loader = RegressionCheckLoader(['.'], ignore_conflicts=True)
        self.loader_with_path = RegressionCheckLoader(
            ['unittests/resources', 'unittests/foobar'], ignore_conflicts=True)
        self.loader_with_prefix = RegressionCheckLoader(
            load_path=['badchecks'],
            prefix=os.path.abspath('unittests/resources'))

        self.system = System('foo')
        self.resources = ResourcesManager()

    def test_load_file_relative(self):
        checks = self.loader.load_from_file(
            'unittests/resources/emptycheck.py',
            system=self.system,
            resources=self.resources)
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'emptycheck')

    def test_load_file_absolute(self):
        checks = self.loader.load_from_file(
            os.path.abspath('unittests/resources/emptycheck.py'),
            system=self.system,
            resources=self.resources)
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'emptycheck')

    def test_load_recursive(self):
        checks = self.loader.load_from_dir('unittests/resources',
                                           recurse=True,
                                           system=self.system,
                                           resources=self.resources)
        self.assertEqual(11, len(checks))

    def test_load_all(self):
        checks = self.loader_with_path.load_all(system=self.system,
                                                resources=self.resources)
        self.assertEqual(10, len(checks))

    def test_load_all_with_prefix(self):
        checks = self.loader_with_prefix.load_all(system=self.system,
                                                  resources=self.resources)
        self.assertEqual(1, len(checks))

    def test_conflicted_checks(self):
        self.loader_with_path._ignore_conflicts = False
        self.assertRaises(NameConflictError,
                          self.loader_with_path.load_all,
                          system=self.system,
                          resources=self.resources)

    def test_load_error(self):
        self.assertRaises(OSError, self.loader.load_from_file,
                          'unittests/resources/foo.py')
Exemplo n.º 5
0
    def test_sigterm(self):
        self.loader = RegressionCheckLoader(
            ['unittests/resources/checks_unlisted/selfkill.py']
        )
        checks = self.loader.load_all()
        with pytest.raises(ReframeForceExitError,
                           match='received TERM signal'):
            self.runall(checks)

        self.assert_all_dead()
        assert self.runner.stats.num_cases() == 1
        assert len(self.runner.stats.failures()) == 1
Exemplo n.º 6
0
class TestRegressionCheckLoader(unittest.TestCase):
    def setUp(self):
        self.loader = RegressionCheckLoader(['.'], ignore_conflicts=True)
        self.loader_with_path = RegressionCheckLoader(
            ['unittests/resources/checks', 'unittests/foobar'],
            ignore_conflicts=True)
        self.loader_with_prefix = RegressionCheckLoader(
            load_path=['bad'],
            prefix=os.path.abspath('unittests/resources/checks'))

    def test_load_file_relative(self):
        checks = self.loader.load_from_file(
            'unittests/resources/checks/emptycheck.py')
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'EmptyTest')

    def test_load_file_absolute(self):
        checks = self.loader.load_from_file(
            os.path.abspath('unittests/resources/checks/emptycheck.py'))
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'EmptyTest')

    def test_load_recursive(self):
        checks = self.loader.load_from_dir('unittests/resources/checks',
                                           recurse=True)
        self.assertEqual(12, len(checks))

    def test_load_all(self):
        checks = self.loader_with_path.load_all()
        self.assertEqual(11, len(checks))

    def test_load_all_with_prefix(self):
        checks = self.loader_with_prefix.load_all()
        self.assertEqual(1, len(checks))

    def test_load_new_syntax(self):
        checks = self.loader.load_from_file(
            'unittests/resources/checks_unlisted/good.py')
        self.assertEqual(13, len(checks))

    def test_conflicted_checks(self):
        self.loader_with_path._ignore_conflicts = False
        self.assertRaises(NameConflictError, self.loader_with_path.load_all)

    def test_load_error(self):
        self.assertRaises(OSError, self.loader.load_from_file,
                          'unittests/resources/checks/foo.py')

    def test_load_bad_required_version(self):
        with self.assertRaises(ValueError):
            self.loader.load_from_file('unittests/resources/checks_unlisted/'
                                       'no_required_version.py')

    def test_load_bad_init(self):
        tests = self.loader.load_from_file(
            'unittests/resources/checks_unlisted/bad_init_check.py')
        self.assertEqual(0, len(tests))
Exemplo n.º 7
0
class TestRegressionCheckLoader(unittest.TestCase):
    def setUp(self):
        warnings.simplefilter('ignore', ReframeDeprecationWarning)
        self.loader = RegressionCheckLoader(['.'])
        self.loader_with_path = RegressionCheckLoader(
            ['unittests/resources', 'unittests/foobar'])
        self.loader_with_prefix = RegressionCheckLoader(
            load_path=['badchecks'],
            prefix=os.path.abspath('unittests/resources'))

        self.system = System('foo')
        self.resources = ResourcesManager()

    def tearDown(self):
        warnings.simplefilter('default', ReframeDeprecationWarning)

    def test_load_file_relative(self):
        checks = self.loader.load_from_file(
            'unittests/resources/emptycheck.py',
            system=self.system,
            resources=self.resources)
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'emptycheck')

    def test_load_file_absolute(self):
        checks = self.loader.load_from_file(
            os.path.abspath('unittests/resources/emptycheck.py'),
            system=self.system,
            resources=self.resources)
        self.assertEqual(1, len(checks))
        self.assertEqual(checks[0].name, 'emptycheck')

    def test_load_recursive(self):
        checks = self.loader.load_from_dir('unittests/resources',
                                           recurse=True,
                                           system=self.system,
                                           resources=self.resources)
        self.assertEqual(12, len(checks))

    def test_load_all(self):
        checks = self.loader_with_path.load_all(system=self.system,
                                                resources=self.resources)
        self.assertEqual(11, len(checks))

    def test_load_all_with_prefix(self):
        checks = self.loader_with_prefix.load_all(system=self.system,
                                                  resources=self.resources)
        self.assertEqual(1, len(checks))

    def test_load_error(self):
        self.assertRaises(ReframeError, self.loader.load_from_file,
                          'unittests/resources/foo.py')
Exemplo n.º 8
0
    def setUp(self):
        self.loader = RegressionCheckLoader(['unittests/resources/checks'],
                                            ignore_conflicts=True)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all()

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

        # Reset current_run
        rt.runtime()._current_run = 0
Exemplo n.º 9
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    response = requests.get('https://json.schemastore.org/gitlab-ci')
    assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
Exemplo n.º 10
0
    def setUp(self):
        # Load a system configuration
        settings = config.load_from_file("reframe/settings.py")
        self.site_config = config.SiteConfiguration()
        self.site_config.load_from_dict(settings.site_configuration)
        self.system = self.site_config.systems['generic']
        self.resourcesdir = tempfile.mkdtemp(dir='unittests')
        self.resources = ResourcesManager(prefix=self.resourcesdir)
        self.loader = RegressionCheckLoader(['unittests/resources'],
                                            ignore_conflicts=True)

        # Init modules system
        init_modules_system(self.system.modules_system)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all(system=self.system,
                                           resources=self.resources)
Exemplo n.º 11
0
def loader_with_path_tmpdir(tmp_path):
    test_dir_a = tmp_path / 'a'
    test_dir_b = tmp_path / 'b'
    os.mkdir(test_dir_a)
    os.mkdir(test_dir_b)
    test_a = 'unittests/resources/checks/emptycheck.py'
    test_b = 'unittests/resources/checks/hellocheck.py'
    shutil.copyfile(test_a, test_dir_a / 'test.py')
    shutil.copyfile(test_b, test_dir_b / 'test.py')
    return RegressionCheckLoader(
        [test_dir_a.as_posix(), test_dir_b.as_posix()])
Exemplo n.º 12
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    try:
        response = requests.get(
            'https://gitlab.com/gitlab-org/gitlab/-/raw/master/app/assets/javascripts/editor/schema/ci.json'  # noqa: E501
        )
    except requests.exceptions.ConnectionError as e:
        pytest.skip(f'could not reach URL: {e}')
    else:
        assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
Exemplo n.º 13
0
 def setUp(self):
     self.loader = RegressionCheckLoader(['.'], ignore_conflicts=True)
     self.loader_with_path = RegressionCheckLoader(
         ['unittests/resources/checks', 'unittests/foobar'],
         ignore_conflicts=True)
     self.loader_with_prefix = RegressionCheckLoader(
         load_path=['bad'],
         prefix=os.path.abspath('unittests/resources/checks'))
Exemplo n.º 14
0
    def setUp(self):
        self.loader = RegressionCheckLoader(['.'], ignore_conflicts=True)
        self.loader_with_path = RegressionCheckLoader(
            ['unittests/resources', 'unittests/foobar'], ignore_conflicts=True)
        self.loader_with_prefix = RegressionCheckLoader(
            load_path=['badchecks'],
            prefix=os.path.abspath('unittests/resources'))

        self.system = System('foo')
        self.resources = ResourcesManager()
Exemplo n.º 15
0
    def setUp(self):
        warnings.simplefilter('ignore', ReframeDeprecationWarning)
        self.loader = RegressionCheckLoader(['.'])
        self.loader_with_path = RegressionCheckLoader(
            ['unittests/resources', 'unittests/foobar'])
        self.loader_with_prefix = RegressionCheckLoader(
            load_path=['badchecks'],
            prefix=os.path.abspath('unittests/resources'))

        self.system = System('foo')
        self.resources = ResourcesManager()
Exemplo n.º 16
0
    def setUp(self):
        self.partition, self.prgenv = _setup_local_execution()
        self.loader = RegressionCheckLoader(['unittests/resources/checks'])

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')
Exemplo n.º 17
0
class TestRegressionTest(unittest.TestCase):
    def setUp(self):
        self.partition, self.prgenv = _setup_local_execution()
        self.loader = RegressionCheckLoader(['unittests/resources/checks'])

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

    def tearDown(self):
        os_ext.rmtree(rt.runtime().resources.prefix)
        os_ext.rmtree('.rfm_testing', ignore_errors=True)

    def replace_prefix(self, filename, new_prefix):
        basename = os.path.basename(filename)
        return os.path.join(new_prefix, basename)

    def keep_files_list(self, test, compile_only=False):
        ret = [
            self.replace_prefix(sn.evaluate(test.stdout), test.outputdir),
            self.replace_prefix(sn.evaluate(test.stderr), test.outputdir)
        ]

        if not compile_only:
            ret.append(
                self.replace_prefix(test.job.script_filename, test.outputdir))

        ret.extend(
            [self.replace_prefix(f, test.outputdir) for f in test.keep_files])
        return ret

    def test_environ_setup(self):
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        # Use test environment for the regression check
        test.valid_prog_environs = [self.prgenv.name]
        test.modules = ['testmod_foo']
        test.variables = {'_FOO_': '1', '_BAR_': '2'}
        test.local = True

        test.setup(self.partition, self.prgenv)

        for k in test.variables.keys():
            assert k not in os.environ

    def _run_test(self, test, compile_only=False):
        _run(test, self.partition, self.prgenv)
        assert not os.path.exists(test.stagedir)
        for f in self.keep_files_list(test, compile_only):
            assert os.path.exists(f)

    @fixtures.switch_to_user_runtime
    def test_hellocheck(self):
        self.partition, self.prgenv = _setup_remote_execution()
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        # Use test environment for the regression check
        test.valid_prog_environs = [self.prgenv.name]
        self._run_test(test)

    @fixtures.switch_to_user_runtime
    def test_hellocheck_make(self):
        self.partition, self.prgenv = _setup_remote_execution()
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck_make.py')[0]

        # Use test environment for the regression check
        test.valid_prog_environs = [self.prgenv.name]
        self._run_test(test)

    def test_hellocheck_local(self):
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        # Use test environment for the regression check
        test.valid_prog_environs = [self.prgenv.name]

        # Test also the prebuild/postbuild functionality
        test.prebuild_cmd = ['touch prebuild', 'mkdir prebuild_dir']
        test.postbuild_cmd = ['touch postbuild', 'mkdir postbuild_dir']
        test.keep_files = [
            'prebuild', 'postbuild', 'prebuild_dir', 'postbuild_dir'
        ]

        # Force local execution of the test
        test.local = True
        self._run_test(test)

    def test_hellocheck_local_prepost_run(self):
        @sn.sanity_function
        def stagedir(test):
            return test.stagedir

        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        # Use test environment for the regression check
        test.valid_prog_environs = [self.prgenv.name]

        # Test also the prebuild/postbuild functionality
        test.pre_run = ['echo prerun: `pwd`']
        test.post_run = ['echo postrun: `pwd`']
        pre_run_path = sn.extractsingle(r'^prerun: (\S+)', test.stdout, 1)
        post_run_path = sn.extractsingle(r'^postrun: (\S+)', test.stdout, 1)
        test.sanity_patterns = sn.all([
            sn.assert_eq(stagedir(test), pre_run_path),
            sn.assert_eq(stagedir(test), post_run_path),
        ])

        # Force local execution of the test
        test.local = True
        self._run_test(test)

    def test_hellocheck_local_prepost_run_in_setup(self):
        def custom_setup(obj, partition, environ, **job_opts):
            super(obj.__class__, obj).setup(partition, environ, **job_opts)
            obj.pre_run = ['echo Prerunning cmd from setup phase']
            obj.post_run = ['echo Postruning cmd from setup phase']

        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        # Monkey patch the setup method of the test
        test.setup = custom_setup.__get__(test)

        # Use test environment for the regression check
        test.valid_prog_environs = ['*']

        test.sanity_patterns = sn.all([
            sn.assert_found(r'^Prerunning cmd from setup phase', test.stdout),
            sn.assert_found(r'Hello, World\!', test.stdout),
            sn.assert_found(r'^Postruning cmd from setup phase', test.stdout)
        ])

        # Force local execution of the test
        test.local = True
        self._run_test(test)

    def test_run_only_sanity(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RunOnlyRegressionTest):
            def __init__(self):
                self.executable = './hello.sh'
                self.executable_opts = ['Hello, World!']
                self.local = True
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']
                self.sanity_patterns = sn.assert_found(r'Hello, World\!',
                                                       self.stdout)

        self._run_test(MyTest())

    def test_run_only_no_srcdir(self):
        @fixtures.custom_prefix('foo/bar/')
        class MyTest(rfm.RunOnlyRegressionTest):
            def __init__(self):
                self.executable = 'echo'
                self.executable_opts = ['hello']
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']
                self.sanity_patterns = sn.assert_found(r'hello', self.stdout)

        test = MyTest()
        assert test.sourcesdir is None
        self._run_test(MyTest())

    def test_compile_only_failure(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.CompileOnlyRegressionTest):
            def __init__(self):
                self.sourcepath = 'compiler_failure.c'
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']

        test = MyTest()
        test.setup(self.partition, self.prgenv)
        test.compile()
        with pytest.raises(BuildError):
            test.compile_wait()

    def test_compile_only_warning(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RunOnlyRegressionTest):
            def __init__(self):
                self.build_system = 'SingleSource'
                self.build_system.srcfile = 'compiler_warning.c'
                self.build_system.cflags = ['-Wall']
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']
                self.sanity_patterns = sn.assert_found(r'warning', self.stderr)

        self._run_test(MyTest(), compile_only=True)

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'testsys')
    def test_supports_system(self):
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        test.valid_systems = ['*']
        assert test.supports_system('gpu')
        assert test.supports_system('login')
        assert test.supports_system('testsys:gpu')
        assert test.supports_system('testsys:login')

        test.valid_systems = ['testsys']
        assert test.supports_system('gpu')
        assert test.supports_system('login')
        assert test.supports_system('testsys:gpu')
        assert test.supports_system('testsys:login')

        test.valid_systems = ['testsys:gpu']
        assert test.supports_system('gpu')
        assert not test.supports_system('login')
        assert test.supports_system('testsys:gpu')
        assert not test.supports_system('testsys:login')

        test.valid_systems = ['testsys:login']
        assert not test.supports_system('gpu')
        assert test.supports_system('login')
        assert not test.supports_system('testsys:gpu')
        assert test.supports_system('testsys:login')

        test.valid_systems = ['foo']
        assert not test.supports_system('gpu')
        assert not test.supports_system('login')
        assert not test.supports_system('testsys:gpu')
        assert not test.supports_system('testsys:login')

    def test_supports_environ(self):
        test = self.loader.load_from_file(
            'unittests/resources/checks/hellocheck.py')[0]

        test.valid_prog_environs = ['*']
        assert test.supports_environ('foo1')
        assert test.supports_environ('foo-env')
        assert test.supports_environ('*')

    def test_sourcesdir_none(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RegressionTest):
            def __init__(self):
                self.sourcesdir = None
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']

        with pytest.raises(ReframeError):
            self._run_test(MyTest())

    def test_sourcesdir_build_system(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RegressionTest):
            def __init__(self):
                self.build_system = 'Make'
                self.sourcepath = 'code'
                self.executable = './code/hello'
                self.local = True
                self.valid_systems = ['*']
                self.valid_prog_environs = ['*']
                self.sanity_patterns = sn.assert_found(r'Hello, World\!',
                                                       self.stdout)

        self._run_test(MyTest())

    def test_sourcesdir_none_generated_sources(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RegressionTest):
            def __init__(self):
                self.sourcesdir = None
                self.prebuild_cmd = [
                    "printf '#include <stdio.h>\\n int main(){ "
                    "printf(\"Hello, World!\\\\n\"); return 0; }' > hello.c"
                ]
                self.executable = './hello'
                self.sourcepath = 'hello.c'
                self.local = True
                self.valid_systems = ['*']
                self.valid_prog_environs = ['*']
                self.sanity_patterns = sn.assert_found(r'Hello, World\!',
                                                       self.stdout)

        self._run_test(MyTest())

    def test_sourcesdir_none_compile_only(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.CompileOnlyRegressionTest):
            def __init__(self):
                self.sourcesdir = None
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']

        with pytest.raises(BuildError):
            self._run_test(MyTest())

    def test_sourcesdir_none_run_only(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.RunOnlyRegressionTest):
            def __init__(self):
                self.sourcesdir = None
                self.executable = 'echo'
                self.executable_opts = ["Hello, World!"]
                self.local = True
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']
                self.sanity_patterns = sn.assert_found(r'Hello, World\!',
                                                       self.stdout)

        self._run_test(MyTest())

    def test_sourcepath_abs(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.CompileOnlyRegressionTest):
            def __init__(self):
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']

        test = MyTest()
        test.setup(self.partition, self.prgenv)
        test.sourcepath = '/usr/src'
        with pytest.raises(PipelineError):
            test.compile()

    def test_sourcepath_upref(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(rfm.CompileOnlyRegressionTest):
            def __init__(self):
                self.valid_prog_environs = ['*']
                self.valid_systems = ['*']

        test = MyTest()
        test.setup(self.partition, self.prgenv)
        test.sourcepath = '../hellosrc'
        with pytest.raises(PipelineError):
            test.compile()

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'testsys')
    def test_extra_resources(self):
        @fixtures.custom_prefix('unittests/resources/checks')
        class MyTest(HelloTest):
            def __init__(self):
                super().__init__()
                self.name = type(self).__name__
                self.executable = os.path.join('.', self.name)
                self.local = True

            @rfm.run_after('setup')
            def set_resources(self):
                test.extra_resources = {
                    'gpu': {
                        'num_gpus_per_node': 2
                    },
                    'datawarp': {
                        'capacity': '100GB',
                        'stagein_src': test.stagedir
                    }
                }
                test.job.options += ['--foo']

        test = MyTest()
        partition = rt.runtime().system.partition('gpu')
        environ = partition.environment('builtin-gcc')
        _run(test, partition, environ)
        expected_job_options = [
            '--gres=gpu:2', '#DW jobdw capacity=100GB',
            '#DW stage_in source=%s' % test.stagedir, '--foo'
        ]
        self.assertCountEqual(expected_job_options, test.job.options)
Exemplo n.º 18
0
class TestSerialExecutionPolicy(unittest.TestCase):
    def setUp(self):
        self.loader = RegressionCheckLoader(['unittests/resources/checks'],
                                            ignore_conflicts=True)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all()

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

        # Reset current_run
        rt.runtime()._current_run = 0

    def tearDown(self):
        os_ext.rmtree(rt.runtime().resources.prefix)

    def _num_failures_stage(self, stage):
        stats = self.runner.stats
        return len(
            [t for t in stats.tasks_failed() if t.failed_stage == stage])

    def assert_all_dead(self):
        stats = self.runner.stats
        for t in self.runner.stats.get_tasks():
            try:
                finished = t.check.poll()
            except JobNotStartedError:
                finished = True

            self.assertTrue(finished)

    def test_runall(self):
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(4, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_system_check(self):
        self.runner.policy.skip_system_check = True
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_prgenv_check(self):
        self.runner.policy.skip_environ_check = True
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_sanity_check(self):
        self.runner.policy.skip_sanity_check = True
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(3, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(0, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_performance_check(self):
        self.runner.policy.skip_performance_check = True
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(3, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(0, self._num_failures_stage('performance'))

    def test_strict_performance_check(self):
        self.runner.policy.strict_check = True
        self.runner.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(5, stats.num_failures())
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(2, self._num_failures_stage('performance'))

    def test_force_local_execution(self):
        self.runner.policy.force_local = True
        self.runner.runall([HelloTest()])
        stats = self.runner.stats
        for t in stats.get_tasks():
            self.assertTrue(t.check.local)

    def test_kbd_interrupt_within_test(self):
        check = KeyboardInterruptCheck()
        self.assertRaises(KeyboardInterrupt, self.runner.runall, [check])
        stats = self.runner.stats
        self.assertEqual(1, stats.num_failures())
        self.assert_all_dead()

    def test_system_exit_within_test(self):
        check = SystemExitCheck()

        # This should not raise and should not exit
        self.runner.runall([check])
        stats = self.runner.stats
        self.assertEqual(1, stats.num_failures())

    def test_retries_bad_check(self):
        max_retries = 2
        checks = [BadSetupCheck(), BadSetupCheckEarly()]
        self.runner._max_retries = max_retries
        self.runner.runall(checks)

        # Ensure that the test was retried #max_retries times and failed.
        self.assertEqual(2, self.runner.stats.num_cases())
        self.assertEqual(max_retries, rt.runtime().current_run)
        self.assertEqual(2, self.runner.stats.num_failures())

        # Ensure that the report does not raise any exception.
        self.runner.stats.retry_report()

    def test_retries_good_check(self):
        max_retries = 2
        checks = [HelloTest()]
        self.runner._max_retries = max_retries
        self.runner.runall(checks)

        # Ensure that the test passed without retries.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(0, rt.runtime().current_run)
        self.assertEqual(0, self.runner.stats.num_failures())

    def test_pass_in_retries(self):
        max_retries = 3
        run_to_pass = 2
        # Create a file containing the current_run; Run 0 will set it to 0,
        # run 1 to 1 and so on.
        with tempfile.NamedTemporaryFile(mode='wt', delete=False) as fp:
            fp.write('0\n')

        checks = [RetriesCheck(run_to_pass, fp.name)]
        self.runner._max_retries = max_retries
        self.runner.runall(checks)

        # Ensure that the test passed after retries in run #run_to_pass.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(1, self.runner.stats.num_failures(run=0))
        self.assertEqual(run_to_pass, rt.runtime().current_run)
        self.assertEqual(0, self.runner.stats.num_failures())
        os.remove(fp.name)
Exemplo n.º 19
0
class TestDependencies(unittest.TestCase):
    class Node:
        '''A node in the test case graph.

        It's simply a wrapper to a (test_name, partition, environment) tuple
        that can interact seemlessly with a real test case.
        It's meant for convenience in unit testing.
        '''
        def __init__(self, cname, pname, ename):
            self.cname, self.pname, self.ename = cname, pname, ename

        def __eq__(self, other):
            if isinstance(other, type(self)):
                return (self.cname == other.cname and self.pname == other.pname
                        and self.ename == other.ename)

            if isinstance(other, executors.TestCase):
                return (self.cname == other.check.name
                        and self.pname == other.partition.fullname
                        and self.ename == other.environ.name)

            return NotImplemented

        def __hash__(self):
            return hash(self.cname) ^ hash(self.pname) ^ hash(self.ename)

        def __repr__(self):
            return 'Node(%r, %r, %r)' % (self.cname, self.pname, self.ename)

    def has_edge(graph, src, dst):
        return dst in graph[src]

    def num_deps(graph, cname):
        return sum(
            len(deps) for c, deps in graph.items() if c.check.name == cname)

    def in_degree(graph, node):
        for v in graph.keys():
            if v == node:
                return v.num_dependents

    def find_check(name, checks):
        for c in checks:
            if c.name == name:
                return c

        return None

    def find_case(cname, ename, cases):
        for c in cases:
            if c.check.name == cname and c.environ.name == ename:
                return c

    def setUp(self):
        self.loader = RegressionCheckLoader(
            ['unittests/resources/checks_unlisted/deps_simple.py'])

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

    def tearDown(self):
        os_ext.rmtree(rt.runtime().resources.prefix)

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_eq_hash(self):
        find_case = TestDependencies.find_case
        cases = executors.generate_testcases(self.loader.load_all())

        case0 = find_case('Test0', 'e0', cases)
        case1 = find_case('Test0', 'e1', cases)
        case0_copy = case0.clone()

        assert case0 == case0_copy
        assert hash(case0) == hash(case0_copy)
        assert case1 != case0
        assert hash(case1) != hash(case0)

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_build_deps(self):
        Node = TestDependencies.Node
        has_edge = TestDependencies.has_edge
        num_deps = TestDependencies.num_deps
        in_degree = TestDependencies.in_degree
        find_check = TestDependencies.find_check
        find_case = TestDependencies.find_case

        checks = self.loader.load_all()
        cases = executors.generate_testcases(checks)

        # Test calling getdep() before having built the graph
        t = find_check('Test1_exact', checks)
        with pytest.raises(DependencyError):
            t.getdep('Test0', 'e0')

        # Build dependencies and continue testing
        deps = dependency.build_deps(cases)
        dependency.validate_deps(deps)

        # Check DEPEND_FULLY dependencies
        assert num_deps(deps, 'Test1_fully') == 8
        for p in ['sys0:p0', 'sys0:p1']:
            for e0 in ['e0', 'e1']:
                for e1 in ['e0', 'e1']:
                    assert has_edge(deps, Node('Test1_fully', p, e0),
                                    Node('Test0', p, e1))

        # Check DEPEND_BY_ENV
        assert num_deps(deps, 'Test1_by_env') == 4
        assert num_deps(deps, 'Test1_default') == 4
        for p in ['sys0:p0', 'sys0:p1']:
            for e in ['e0', 'e1']:
                assert has_edge(deps, Node('Test1_by_env', p, e),
                                Node('Test0', p, e))
                assert has_edge(deps, Node('Test1_default', p, e),
                                Node('Test0', p, e))

        # Check DEPEND_EXACT
        assert num_deps(deps, 'Test1_exact') == 6
        for p in ['sys0:p0', 'sys0:p1']:
            assert has_edge(deps, Node('Test1_exact', p, 'e0'),
                            Node('Test0', p, 'e0'))
            assert has_edge(deps, Node('Test1_exact', p, 'e0'),
                            Node('Test0', p, 'e1'))
            assert has_edge(deps, Node('Test1_exact', p, 'e1'),
                            Node('Test0', p, 'e1'))

        # Check in-degree of Test0

        # 2 from Test1_fully,
        # 1 from Test1_by_env,
        # 1 from Test1_exact,
        # 1 from Test1_default
        assert in_degree(deps, Node('Test0', 'sys0:p0', 'e0')) == 5
        assert in_degree(deps, Node('Test0', 'sys0:p1', 'e0')) == 5

        # 2 from Test1_fully,
        # 1 from Test1_by_env,
        # 2 from Test1_exact,
        # 1 from Test1_default
        assert in_degree(deps, Node('Test0', 'sys0:p0', 'e1')) == 6
        assert in_degree(deps, Node('Test0', 'sys0:p1', 'e1')) == 6

        # Pick a check to test getdep()
        check_e0 = find_case('Test1_exact', 'e0', cases).check
        check_e1 = find_case('Test1_exact', 'e1', cases).check

        with pytest.raises(DependencyError):
            check_e0.getdep('Test0')

        # Set the current environment
        check_e0._current_environ = Environment('e0')
        check_e1._current_environ = Environment('e1')

        assert check_e0.getdep('Test0', 'e0').name == 'Test0'
        assert check_e0.getdep('Test0', 'e1').name == 'Test0'
        assert check_e1.getdep('Test0', 'e1').name == 'Test0'
        with pytest.raises(DependencyError):
            check_e0.getdep('TestX', 'e0')

        with pytest.raises(DependencyError):
            check_e0.getdep('Test0', 'eX')

        with pytest.raises(DependencyError):
            check_e1.getdep('Test0', 'e0')

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_build_deps_unknown_test(self):
        find_check = TestDependencies.find_check
        checks = self.loader.load_all()

        # Add some inexistent dependencies
        test0 = find_check('Test0', checks)
        for depkind in ('default', 'fully', 'by_env', 'exact'):
            test1 = find_check('Test1_' + depkind, checks)
            if depkind == 'default':
                test1.depends_on('TestX')
            elif depkind == 'exact':
                test1.depends_on('TestX', rfm.DEPEND_EXACT, {'e0': ['e0']})
            elif depkind == 'fully':
                test1.depends_on('TestX', rfm.DEPEND_FULLY)
            elif depkind == 'by_env':
                test1.depends_on('TestX', rfm.DEPEND_BY_ENV)

            with pytest.raises(DependencyError):
                dependency.build_deps(executors.generate_testcases(checks))

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_build_deps_unknown_target_env(self):
        find_check = TestDependencies.find_check
        checks = self.loader.load_all()

        # Add some inexistent dependencies
        test0 = find_check('Test0', checks)
        test1 = find_check('Test1_default', checks)
        test1.depends_on('Test0', rfm.DEPEND_EXACT, {'e0': ['eX']})
        with pytest.raises(DependencyError):
            dependency.build_deps(executors.generate_testcases(checks))

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_build_deps_unknown_source_env(self):
        find_check = TestDependencies.find_check
        num_deps = TestDependencies.num_deps
        checks = self.loader.load_all()

        # Add some inexistent dependencies
        test0 = find_check('Test0', checks)
        test1 = find_check('Test1_default', checks)
        test1.depends_on('Test0', rfm.DEPEND_EXACT, {'eX': ['e0']})

        # Unknown source is ignored, because it might simply be that the test
        # is not executed for eX
        deps = dependency.build_deps(executors.generate_testcases(checks))
        assert num_deps(deps, 'Test1_default') == 4

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_build_deps_empty(self):
        assert {} == dependency.build_deps([])

    def create_test(self, name):
        test = rfm.RegressionTest()
        test.name = name
        test.valid_systems = ['*']
        test.valid_prog_environs = ['*']
        test.executable = 'echo'
        test.executable_opts = [name]
        return test

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_valid_deps(self):
        #
        #       t0       +-->t5<--+
        #       ^        |        |
        #       |        |        |
        #   +-->t1<--+   t6       t7
        #   |        |            ^
        #   t2<------t3           |
        #   ^        ^            |
        #   |        |            t8
        #   +---t4---+
        #
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t2 = self.create_test('t2')
        t3 = self.create_test('t3')
        t4 = self.create_test('t4')
        t5 = self.create_test('t5')
        t6 = self.create_test('t6')
        t7 = self.create_test('t7')
        t8 = self.create_test('t8')
        t1.depends_on('t0')
        t2.depends_on('t1')
        t3.depends_on('t1')
        t3.depends_on('t2')
        t4.depends_on('t2')
        t4.depends_on('t3')
        t6.depends_on('t5')
        t7.depends_on('t5')
        t8.depends_on('t7')
        dependency.validate_deps(
            dependency.build_deps(
                executors.generate_testcases(
                    [t0, t1, t2, t3, t4, t5, t6, t7, t8])))

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_cyclic_deps(self):
        #
        #       t0       +-->t5<--+
        #       ^        |        |
        #       |        |        |
        #   +-->t1<--+   t6       t7
        #   |   |    |            ^
        #   t2  |    t3           |
        #   ^   |    ^            |
        #   |   v    |            t8
        #   +---t4---+
        #
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t2 = self.create_test('t2')
        t3 = self.create_test('t3')
        t4 = self.create_test('t4')
        t5 = self.create_test('t5')
        t6 = self.create_test('t6')
        t7 = self.create_test('t7')
        t8 = self.create_test('t8')
        t1.depends_on('t0')
        t1.depends_on('t4')
        t2.depends_on('t1')
        t3.depends_on('t1')
        t4.depends_on('t2')
        t4.depends_on('t3')
        t6.depends_on('t5')
        t7.depends_on('t5')
        t8.depends_on('t7')
        deps = dependency.build_deps(
            executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]))

        with pytest.raises(DependencyError) as exc_info:
            dependency.validate_deps(deps)

        assert ('t4->t2->t1->t4' in str(exc_info.value)
                or 't2->t1->t4->t2' in str(exc_info.value)
                or 't1->t4->t2->t1' in str(exc_info.value)
                or 't1->t4->t3->t1' in str(exc_info.value)
                or 't4->t3->t1->t4' in str(exc_info.value)
                or 't3->t1->t4->t3' in str(exc_info.value))

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_cyclic_deps_by_env(self):
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t1.depends_on('t0', rfm.DEPEND_EXACT, {'e0': ['e0']})
        t0.depends_on('t1', rfm.DEPEND_EXACT, {'e1': ['e1']})
        deps = dependency.build_deps(executors.generate_testcases([t0, t1]))
        with pytest.raises(DependencyError) as exc_info:
            dependency.validate_deps(deps)

        assert ('t1->t0->t1' in str(exc_info.value)
                or 't0->t1->t0' in str(exc_info.value))

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_validate_deps_empty(self):
        dependency.validate_deps({})

    def assert_topological_order(self, cases, graph):
        cases_order = []
        visited_tests = set()
        tests = util.OrderedSet()
        for c in cases:
            check, part, env = c
            cases_order.append((check.name, part.fullname, env.name))
            tests.add(check.name)
            visited_tests.add(check.name)

            # Assert that all dependencies of c have been visited before
            for d in graph[c]:
                if d not in cases:
                    # dependency points outside the subgraph
                    continue

                assert d.check.name in visited_tests

        # Check the order of systems and prog. environments
        # We are checking against all possible orderings
        valid_orderings = []
        for partitions in itertools.permutations(['sys0:p0', 'sys0:p1']):
            for environs in itertools.permutations(['e0', 'e1']):
                ordering = []
                for t in tests:
                    for p in partitions:
                        for e in environs:
                            ordering.append((t, p, e))

                valid_orderings.append(ordering)

        assert cases_order in valid_orderings

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_toposort(self):
        #
        #       t0       +-->t5<--+
        #       ^        |        |
        #       |        |        |
        #   +-->t1<--+   t6       t7
        #   |        |            ^
        #   t2<------t3           |
        #   ^        ^            |
        #   |        |            t8
        #   +---t4---+
        #
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t2 = self.create_test('t2')
        t3 = self.create_test('t3')
        t4 = self.create_test('t4')
        t5 = self.create_test('t5')
        t6 = self.create_test('t6')
        t7 = self.create_test('t7')
        t8 = self.create_test('t8')
        t1.depends_on('t0')
        t2.depends_on('t1')
        t3.depends_on('t1')
        t3.depends_on('t2')
        t4.depends_on('t2')
        t4.depends_on('t3')
        t6.depends_on('t5')
        t7.depends_on('t5')
        t8.depends_on('t7')
        deps = dependency.build_deps(
            executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]))
        cases = dependency.toposort(deps)
        self.assert_topological_order(cases, deps)

    @rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'sys0')
    def test_toposort_subgraph(self):
        #
        #       t0
        #       ^
        #       |
        #   +-->t1<--+
        #   |        |
        #   t2<------t3
        #   ^        ^
        #   |        |
        #   +---t4---+
        #
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t2 = self.create_test('t2')
        t3 = self.create_test('t3')
        t4 = self.create_test('t4')
        t1.depends_on('t0')
        t2.depends_on('t1')
        t3.depends_on('t1')
        t3.depends_on('t2')
        t4.depends_on('t2')
        t4.depends_on('t3')
        full_deps = dependency.build_deps(
            executors.generate_testcases([t0, t1, t2, t3, t4]))
        partial_deps = dependency.build_deps(
            executors.generate_testcases([t3, t4]), full_deps)
        cases = dependency.toposort(partial_deps, is_subgraph=True)
        self.assert_topological_order(cases, partial_deps)
Exemplo n.º 20
0
class TestSerialExecutionPolicy(unittest.TestCase):
    def setUp(self):
        # Load a system configuration
        settings = config.load_from_file("reframe/settings.py")
        self.site_config = config.SiteConfiguration()
        self.site_config.load_from_dict(settings.site_configuration)
        self.system = self.site_config.systems['generic']
        self.resourcesdir = tempfile.mkdtemp(dir='unittests')
        self.resources = ResourcesManager(prefix=self.resourcesdir)
        self.loader = RegressionCheckLoader(['unittests/resources'],
                                            ignore_conflicts=True)

        # Init modules system
        init_modules_system(self.system.modules_system)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all(system=self.system,
                                           resources=self.resources)

    def tearDown(self):
        shutil.rmtree(self.resourcesdir, ignore_errors=True)

    def _num_failures_stage(self, stage):
        stats = self.runner.stats
        return len(
            [t for t in stats.tasks_failed() if t.failed_stage == stage])

    def assert_all_dead(self):
        stats = self.runner.stats
        for t in self.runner.stats.get_tasks():
            try:
                finished = t.check.poll()
            except JobNotStartedError:
                finished = True

            self.assertTrue(finished)

    def test_runall(self):
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(5, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_system_check(self):
        self.runner.policy.skip_system_check = True
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(9, stats.num_cases())
        self.assertEqual(5, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_prgenv_check(self):
        self.runner.policy.skip_environ_check = True
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(9, stats.num_cases())
        self.assertEqual(5, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_sanity_check(self):
        self.runner.policy.skip_sanity_check = True
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(0, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_performance_check(self):
        self.runner.policy.skip_performance_check = True
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(0, self._num_failures_stage('performance'))

    def test_strict_performance_check(self):
        self.runner.policy.strict_check = True
        self.runner.runall(self.checks, self.system)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(6, stats.num_failures())
        self.assertEqual(3, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(2, self._num_failures_stage('performance'))

    def test_force_local_execution(self):
        self.runner.policy.force_local = True
        self.runner.runall(
            [HelloTest(system=self.system, resources=self.resources)],
            self.system)
        stats = self.runner.stats
        for t in stats.get_tasks():
            self.assertTrue(t.check.local)

    def test_kbd_interrupt_within_test(self):
        check = KeyboardInterruptCheck(system=self.system,
                                       resources=self.resources)
        self.assertRaises(KeyboardInterrupt, self.runner.runall, [check],
                          self.system)
        stats = self.runner.stats
        self.assertEqual(1, stats.num_failures())
        self.assert_all_dead()

    def test_system_exit_within_test(self):
        check = SystemExitCheck(system=self.system, resources=self.resources)

        # This should not raise and should not exit
        self.runner.runall([check], self.system)
        stats = self.runner.stats
        self.assertEqual(1, stats.num_failures())

    def test_retries_bad_check(self):
        max_retries = 2
        checks = [BadSetupCheck(system=self.system, resources=self.resources)]
        self.runner._max_retries = max_retries
        self.runner.runall(checks, self.system)

        # Ensure that the test was retried #max_retries times and failed.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(max_retries, self.runner.stats.current_run)
        self.assertEqual(1, self.runner.stats.num_failures())

    def test_retries_good_check(self):
        max_retries = 2
        checks = [HelloTest(system=self.system, resources=self.resources)]
        self.runner._max_retries = max_retries
        self.runner.runall(checks, self.system)

        # Ensure that the test passed without retries.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(0, self.runner.stats.current_run)
        self.assertEqual(0, self.runner.stats.num_failures())

    def test_pass_in_retries(self):
        max_retries = 3
        run_to_pass = 2
        # Create a file containing the current_run; Run 0 will set it to 0,
        # run 1 to 1 and so on.
        with tempfile.NamedTemporaryFile(mode='wt', delete=False) as fp:
            fp.write('0\n')

        checks = [
            RetriesCheck(run_to_pass,
                         fp.name,
                         system=self.system,
                         resources=self.resources)
        ]
        self.runner._max_retries = max_retries
        self.runner.runall(checks, self.system)

        # Ensure that the test passed after retries in run #run_to_pass.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(1, self.runner.stats.num_failures(run=0))
        self.assertEqual(run_to_pass, self.runner.stats.current_run)
        self.assertEqual(0, self.runner.stats.num_failures())
        os.remove(fp.name)
Exemplo n.º 21
0
 def _make_loader(check_search_path):
     return RegressionCheckLoader(check_search_path, ignore_conflicts=True)
Exemplo n.º 22
0
def main():
    # Setup command line options
    argparser = argparse.ArgumentParser()
    output_options = argparser.add_argument_group('Options controlling output')
    locate_options = argparser.add_argument_group(
        'Options for locating checks')
    select_options = argparser.add_argument_group(
        'Options for selecting checks')
    action_options = argparser.add_argument_group(
        'Options controlling actions')
    run_options = argparser.add_argument_group(
        'Options controlling execution of checks')
    env_options = argparser.add_argument_group(
        'Options controlling environment')
    misc_options = argparser.add_argument_group('Miscellaneous options')

    # Output directory options
    output_options.add_argument('--prefix',
                                action='store',
                                metavar='DIR',
                                help='Set output directory prefix to DIR',
                                envvar='RFM_PREFIX',
                                configvar='systems/prefix')
    output_options.add_argument('-o',
                                '--output',
                                action='store',
                                metavar='DIR',
                                help='Set output directory to DIR',
                                envvar='RFM_OUTPUT_DIR',
                                configvar='systems/outputdir')
    output_options.add_argument('-s',
                                '--stage',
                                action='store',
                                metavar='DIR',
                                help='Set stage directory to DIR',
                                envvar='RFM_STAGE_DIR',
                                configvar='systems/stagedir')
    output_options.add_argument(
        '--perflogdir',
        action='store',
        metavar='DIR',
        help=('Set directory prefix for the performance logs '
              '(default: ${prefix}/perflogs, '
              'relevant only if the filelog backend is used)'),
        envvar='RFM_PERFLOG_DIR',
        configvar='logging/handlers_perflog/filelog_basedir')
    output_options.add_argument(
        '--keep-stage-files',
        action='store_true',
        help='Keep stage directory even if check is successful',
        envvar='RFM_KEEP_STAGE_FILES',
        configvar='general/keep_stage_files')
    output_options.add_argument(
        '--save-log-files',
        action='store_true',
        default=False,
        help=('Copy the log file from the current directory to the '
              'output directory when ReFrame ends'),
        envvar='RFM_SAVE_LOG_FILES',
        configvar='general/save_log_files')

    # Check discovery options
    locate_options.add_argument(
        '-c',
        '--checkpath',
        action='append',
        metavar='DIR|FILE',
        help="Add DIR or FILE to the check search path",
        envvar='RFM_CHECK_SEARCH_PATH :',
        configvar='general/check_search_path')
    locate_options.add_argument('-R',
                                '--recursive',
                                action='store_true',
                                help='Load checks recursively',
                                envvar='RFM_CHECK_SEARCH_RECURSIVE',
                                configvar='general/check_search_recursive')
    locate_options.add_argument('--ignore-check-conflicts',
                                action='store_true',
                                help='Skip checks with conflicting names',
                                envvar='RFM_IGNORE_CHECK_CONFLICTS',
                                configvar='general/ignore_check_conflicts')

    # Select options
    select_options.add_argument('-t',
                                '--tag',
                                action='append',
                                dest='tags',
                                default=[],
                                help='Select checks matching TAG')
    select_options.add_argument('-n',
                                '--name',
                                action='append',
                                dest='names',
                                default=[],
                                metavar='NAME',
                                help='Select checks with NAME')
    select_options.add_argument('-x',
                                '--exclude',
                                action='append',
                                dest='exclude_names',
                                metavar='NAME',
                                default=[],
                                help='Exclude checks with NAME')
    select_options.add_argument(
        '-p',
        '--prgenv',
        action='append',
        default=[r'.*'],
        help='Select tests for PRGENV programming environment only')
    select_options.add_argument('--gpu-only',
                                action='store_true',
                                help='Select only GPU tests')
    select_options.add_argument('--cpu-only',
                                action='store_true',
                                help='Select only CPU tests')

    # Action options
    action_options.add_argument('-l',
                                '--list',
                                action='store_true',
                                help='List matched regression checks')
    action_options.add_argument(
        '-L',
        '--list-detailed',
        action='store_true',
        help='List matched regression checks with a detailed description')
    action_options.add_argument('-r',
                                '--run',
                                action='store_true',
                                help='Run regression with the selected checks')

    # Run options
    run_options.add_argument('-A',
                             '--account',
                             action='store',
                             help='Use ACCOUNT for submitting jobs')
    run_options.add_argument('-P',
                             '--partition',
                             action='store',
                             metavar='PART',
                             help='Use PART for submitting jobs')
    run_options.add_argument('--reservation',
                             action='store',
                             metavar='RES',
                             help='Use RES for submitting jobs')
    run_options.add_argument('--nodelist',
                             action='store',
                             help='Run checks on the selected list of nodes')
    run_options.add_argument(
        '--exclude-nodes',
        action='store',
        metavar='NODELIST',
        help='Exclude the list of nodes from running checks')
    run_options.add_argument('--job-option',
                             action='append',
                             metavar='OPT',
                             dest='job_options',
                             default=[],
                             help='Pass OPT to job scheduler')
    run_options.add_argument('--force-local',
                             action='store_true',
                             help='Force local execution of checks')
    run_options.add_argument('--skip-sanity-check',
                             action='store_true',
                             help='Skip sanity checking')
    run_options.add_argument('--skip-performance-check',
                             action='store_true',
                             help='Skip performance checking')
    run_options.add_argument('--strict',
                             action='store_true',
                             help='Force strict performance checking')
    run_options.add_argument('--skip-system-check',
                             action='store_true',
                             help='Skip system check')
    run_options.add_argument('--skip-prgenv-check',
                             action='store_true',
                             help='Skip prog. environment check')
    run_options.add_argument(
        '--exec-policy',
        metavar='POLICY',
        action='store',
        choices=['async', 'serial'],
        default='async',
        help='Specify the execution policy for running the regression tests. '
        'Available policies: "async" (default), "serial"')
    run_options.add_argument('--mode',
                             action='store',
                             help='Execution mode to use')
    run_options.add_argument(
        '--max-retries',
        metavar='NUM',
        action='store',
        default=0,
        help='Specify the maximum number of times a failed regression test '
        'may be retried (default: 0)')
    run_options.add_argument(
        '--flex-alloc-tasks',
        action='store',
        dest='flex_alloc_tasks',
        metavar='{all|idle|NUM}',
        default=None,
        help='*deprecated*, please use --flex-alloc-nodes instead')
    run_options.add_argument(
        '--flex-alloc-nodes',
        action='store',
        dest='flex_alloc_nodes',
        metavar='{all|idle|NUM}',
        default=None,
        help="Strategy for flexible node allocation (default: 'idle').")

    env_options.add_argument('-M',
                             '--map-module',
                             action='append',
                             metavar='MAPPING',
                             dest='module_mappings',
                             default=[],
                             help='Apply a single module mapping',
                             envvar='RFM_MODULE_MAPPINGS ,',
                             configvar='general/module_mappings')
    env_options.add_argument(
        '-m',
        '--module',
        action='append',
        default=[],
        metavar='MOD',
        dest='user_modules',
        help='Load module MOD before running the regression suite',
        envvar='RFM_USER_MODULES',
        configvar='general/user_modules')
    env_options.add_argument('--module-mappings',
                             action='store',
                             metavar='FILE',
                             dest='module_map_file',
                             help='Apply module mappings defined in FILE',
                             envvar='RFM_MODULE_MAP_FILE',
                             configvar='general/module_map_file')
    env_options.add_argument(
        '-u',
        '--unload-module',
        action='append',
        metavar='MOD',
        dest='unload_modules',
        default=[],
        help='Unload module MOD before running the regression suite',
        envvar='RFM_UNLOAD_MODULES',
        configvar='general/unload_modules')
    env_options.add_argument(
        '--purge-env',
        action='store_true',
        dest='purge_env',
        default=False,
        help='Purge environment before running the regression suite',
        envvar='RFM_PURGE_ENVIRONMENT',
        configvar='general/purge_environment')
    env_options.add_argument('--non-default-craype',
                             action='store_true',
                             help='Test a non-default Cray PE',
                             envvar='RFM_NON_DEFAULT_CRAYPE',
                             configvar='general/non_default_craype')

    # Miscellaneous options
    misc_options.add_argument('-C',
                              '--config-file',
                              action='store',
                              dest='config_file',
                              metavar='FILE',
                              help='ReFrame configuration file to use',
                              envvar='RFM_CONFIG_FILE')
    misc_options.add_argument('--nocolor',
                              action='store_false',
                              dest='colorize',
                              help='Disable coloring of output',
                              envvar='RFM_COLORIZE',
                              configvar='general/colorize')
    misc_options.add_argument('--failure-stats',
                              action='store_true',
                              help='Print failure statistics')
    misc_options.add_argument('--performance-report',
                              action='store_true',
                              help='Print a report for performance tests run')
    misc_options.add_argument('--show-config-param',
                              action='store',
                              nargs='?',
                              const='all',
                              metavar='PARAM',
                              help=('Print how parameter PARAM is configured '
                                    'for the current system and exit'))
    misc_options.add_argument('--system',
                              action='store',
                              help='Load configuration for SYSTEM',
                              envvar='RFM_SYSTEM')
    misc_options.add_argument(
        '--timestamp',
        action='store',
        nargs='?',
        const='',
        metavar='TIMEFMT',
        help=('Append a timestamp component to the various '
              'ReFrame directories (default format: "%%FT%%T")'),
        envvar='RFM_TIMESTAMP_DIRS',
        configvar='general/timestamp_dirs')
    misc_options.add_argument('-V',
                              '--version',
                              action='version',
                              version=os_ext.reframe_version())
    misc_options.add_argument('-v',
                              '--verbose',
                              action='count',
                              help='Increase verbosity level of output',
                              envvar='RFM_VERBOSE',
                              configvar='general/verbose')

    # Options not associated with command-line arguments
    argparser.add_argument(
        dest='graylog_server',
        envvar='RFM_GRAYLOG_SERVER',
        configvar='logging/handlers_perflog/graylog_address',
        help='Graylog server address')

    if len(sys.argv) == 1:
        argparser.print_help()
        sys.exit(1)

    # Parse command line
    options = argparser.parse_args()

    # First configure logging with our generic configuration so as to be able
    # to print pretty messages; logging will be reconfigured by user's
    # configuration later
    site_config = config.load_config(
        os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py'))
    site_config.select_subconfig('generic')
    options.update_config(site_config)
    logging.configure_logging(site_config)
    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer = PrettyPrinter()
    printer.colorize = site_config.get('general/0/colorize')
    if options.verbose:
        printer.inc_verbosity(options.verbose)

    # Now configure ReFrame according to the user configuration file
    try:
        try:
            site_config = config.load_config(options.config_file)
        except ReframeDeprecationWarning as e:
            printer.warning(e)
            converted = config.convert_old_config(options.config_file)
            printer.warning(f"configuration file has been converted "
                            f"to the new syntax here: '{converted}'")
            site_config = config.load_config(converted)

        site_config.validate()
        site_config.select_subconfig(options.system)
        for err in options.update_config(site_config):
            printer.warning(str(err))

        logging.configure_logging(site_config)
    except (OSError, ConfigError) as e:
        printer.error(f'failed to load configuration: {e}')
        sys.exit(1)

    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer.colorize = site_config.get('general/0/colorize')
    try:
        runtime.init_runtime(site_config)
    except ConfigError as e:
        printer.error(f'failed to initialize runtime: {e}')
        sys.exit(1)

    rt = runtime.runtime()
    try:
        if site_config.get('general/0/module_map_file'):
            rt.modules_system.load_mapping_from_file(
                site_config.get('general/0/module_map_file'))

        if site_config.get('general/0/module_mappings'):
            for m in site_config.get('general/0/module_mappings'):
                rt.modules_system.load_mapping(m)

    except (ConfigError, OSError) as e:
        printer.error('could not load module mappings: %s' % e)
        sys.exit(1)

    if options.mode:
        try:
            mode_args = rt.get_option(f'modes/@{options.mode}/options')

            # Parse the mode's options and reparse the command-line
            options = argparser.parse_args(mode_args)
            options = argparser.parse_args(namespace=options.cmd_options)
            options.update_config(rt.site_config)
        except ConfigError as e:
            printer.error('could not obtain execution mode: %s' % e)
            sys.exit(1)

    if (os_ext.samefile(rt.stage_prefix, rt.output_prefix)
            and not site_config.get('general/0/keep_stage_files')):
        printer.error("stage and output refer to the same directory; "
                      "if this is on purpose, please use the "
                      "'--keep-stage-files' option.")
        sys.exit(1)

    # Show configuration after everything is set up
    if options.show_config_param:
        config_param = options.show_config_param
        if config_param == 'all':
            printer.info(str(rt.site_config))
        else:
            value = rt.get_option(config_param)
            if value is None:
                printer.error(
                    f'no such configuration parameter found: {config_param}')
            else:
                printer.info(json.dumps(value, indent=2))

        sys.exit(0)

    # Setup the check loader
    loader = RegressionCheckLoader(
        load_path=site_config.get('general/0/check_search_path'),
        recurse=site_config.get('general/0/check_search_recursive'),
        ignore_conflicts=site_config.get('general/0/ignore_check_conflicts'))
    printer.debug(argparse.format_options(options))

    def print_infoline(param, value):
        param = param + ':'
        printer.info(f"  {param.ljust(18)} {value}")

    # Print command line
    printer.info(f"[ReFrame Setup]")
    print_infoline('version', os_ext.reframe_version())
    print_infoline('command', repr(' '.join(sys.argv)))
    print_infoline('launched by',
                   f"{os_ext.osuser() or '<unknown>'}@{socket.gethostname()}")
    print_infoline('working directory', repr(os.getcwd()))
    print_infoline(
        'check search path', f"{'(R)' if loader.recurse else ''} "
        f"{':'.join(loader.load_path)!r}")
    print_infoline('stage directory', repr(rt.stage_prefix))
    print_infoline('output directory', repr(rt.output_prefix))
    printer.info('')
    try:
        # Locate and load checks
        try:
            checks_found = loader.load_all()
        except OSError as e:
            raise ReframeError from e

        # Filter checks by name
        checks_matched = checks_found
        if options.exclude_names:
            for name in options.exclude_names:
                checks_matched = filter(filters.have_not_name(name),
                                        checks_matched)

        if options.names:
            checks_matched = filter(filters.have_name('|'.join(options.names)),
                                    checks_matched)

        # Filter checks by tags
        for tag in options.tags:
            checks_matched = filter(filters.have_tag(tag), checks_matched)

        # Filter checks by prgenv
        if not options.skip_prgenv_check:
            for prgenv in options.prgenv:
                checks_matched = filter(filters.have_prgenv(prgenv),
                                        checks_matched)

        # Filter checks by system
        if not options.skip_system_check:
            checks_matched = filter(
                filters.have_partition(rt.system.partitions), checks_matched)

        # Filter checks further
        if options.gpu_only and options.cpu_only:
            printer.error("options `--gpu-only' and `--cpu-only' "
                          "are mutually exclusive")
            sys.exit(1)

        if options.gpu_only:
            checks_matched = filter(filters.have_gpu_only(), checks_matched)
        elif options.cpu_only:
            checks_matched = filter(filters.have_cpu_only(), checks_matched)

        # Determine the allowed programming environments
        allowed_environs = {
            e.name
            for env_patt in options.prgenv for p in rt.system.partitions
            for e in p.environs if re.match(env_patt, e.name)
        }

        # Generate the test cases, validate dependencies and sort them
        checks_matched = list(checks_matched)
        testcases = generate_testcases(checks_matched,
                                       options.skip_system_check,
                                       options.skip_prgenv_check,
                                       allowed_environs)
        testgraph = dependency.build_deps(testcases)
        dependency.validate_deps(testgraph)
        testcases = dependency.toposort(testgraph)

        # Manipulate ReFrame's environment
        if site_config.get('general/0/purge_environment'):
            rt.modules_system.unload_all()
        else:
            for m in site_config.get('general/0/unload_modules'):
                rt.modules_system.unload_module(m)

        # Load the environment for the current system
        try:
            runtime.loadenv(rt.system.preload_environ)
        except EnvironError as e:
            printer.error("failed to load current system's environment; "
                          "please check your configuration")
            printer.debug(str(e))
            raise

        for m in site_config.get('general/0/user_modules'):
            try:
                rt.modules_system.load_module(m, force=True)
            except EnvironError as e:
                printer.warning("could not load module '%s' correctly: "
                                "Skipping..." % m)
                printer.debug(str(e))

        if options.flex_alloc_tasks:
            printer.warning("`--flex-alloc-tasks' is deprecated and "
                            "will be removed in the future; "
                            "you should use --flex-alloc-nodes instead")
            options.flex_alloc_nodes = (options.flex_alloc_nodes
                                        or options.flex_alloc_tasks)

        options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'

        # Act on checks
        success = True
        if options.list:
            # List matched checks
            list_checks(list(checks_matched), printer)
        elif options.list_detailed:
            # List matched checks with details
            list_checks(list(checks_matched), printer, detailed=True)

        elif options.run:
            # Setup the execution policy
            if options.exec_policy == 'serial':
                exec_policy = SerialExecutionPolicy()
            elif options.exec_policy == 'async':
                exec_policy = AsynchronousExecutionPolicy()
            else:
                # This should not happen, since choices are handled by
                # argparser
                printer.error("unknown execution policy `%s': Exiting...")
                sys.exit(1)

            exec_policy.skip_system_check = options.skip_system_check
            exec_policy.force_local = options.force_local
            exec_policy.strict_check = options.strict
            exec_policy.skip_sanity_check = options.skip_sanity_check
            exec_policy.skip_performance_check = options.skip_performance_check
            exec_policy.keep_stage_files = site_config.get(
                'general/0/keep_stage_files')
            try:
                errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
                sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
                if sched_flex_alloc_nodes <= 0:
                    raise ConfigError(errmsg.format(options.flex_alloc_nodes))
            except ValueError:
                if not options.flex_alloc_nodes.casefold() in {'idle', 'all'}:
                    raise ConfigError(errmsg.format(
                        options.flex_alloc_nodes)) from None

                sched_flex_alloc_nodes = options.flex_alloc_nodes

            exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
            exec_policy.flex_alloc_nodes = options.flex_alloc_nodes
            exec_policy.sched_account = options.account
            exec_policy.sched_partition = options.partition
            exec_policy.sched_reservation = options.reservation
            exec_policy.sched_nodelist = options.nodelist
            exec_policy.sched_exclude_nodelist = options.exclude_nodes
            exec_policy.sched_options = options.job_options
            try:
                max_retries = int(options.max_retries)
            except ValueError:
                raise ConfigError('--max-retries is not a valid integer: %s' %
                                  max_retries) from None
            runner = Runner(exec_policy, printer, max_retries)
            try:
                runner.runall(testcases)
            finally:
                # Print a retry report if we did any retries
                if runner.stats.failures(run=0):
                    printer.info(runner.stats.retry_report())

                # Print a failure report if we had failures in the last run
                if runner.stats.failures():
                    printer.info(runner.stats.failure_report())
                    success = False
                    if options.failure_stats:
                        printer.info(runner.stats.failure_stats())

                if options.performance_report:
                    printer.info(runner.stats.performance_report())

        else:
            printer.error("No action specified. Please specify `-l'/`-L' for "
                          "listing or `-r' for running. "
                          "Try `%s -h' for more options." % argparser.prog)
            sys.exit(1)

        if not success:
            sys.exit(1)

        sys.exit(0)

    except KeyboardInterrupt:
        sys.exit(1)
    except ReframeError as e:
        printer.error(str(e))
        sys.exit(1)
    except (Exception, ReframeFatalError):
        printer.error(format_exception(*sys.exc_info()))
        sys.exit(1)
    finally:
        try:
            if site_config.get('general/0/save_log_files'):
                logging.save_log_files(rt.output_prefix)

        except OSError as e:
            printer.error('could not save log file: %s' % e)
            sys.exit(1)
Exemplo n.º 23
0
def loader_with_path():
    return RegressionCheckLoader(
        ['unittests/resources/checks', 'unittests/foobar'],
        ignore_conflicts=True)
Exemplo n.º 24
0
def loader():
    return RegressionCheckLoader(['.'], ignore_conflicts=True)
Exemplo n.º 25
0
def main():
    # Setup command line options
    argparser = argparse.ArgumentParser()
    output_options = argparser.add_argument_group(
        'Options controlling ReFrame output')
    locate_options = argparser.add_argument_group(
        'Options for discovering checks')
    select_options = argparser.add_argument_group(
        'Options for selecting checks')
    action_options = argparser.add_argument_group(
        'Options controlling actions')
    run_options = argparser.add_argument_group(
        'Options controlling the execution of checks')
    env_options = argparser.add_argument_group(
        'Options controlling the ReFrame environment')
    misc_options = argparser.add_argument_group('Miscellaneous options')

    # Output directory options
    output_options.add_argument('--prefix',
                                action='store',
                                metavar='DIR',
                                help='Set general directory prefix to DIR',
                                envvar='RFM_PREFIX',
                                configvar='systems/prefix')
    output_options.add_argument('-o',
                                '--output',
                                action='store',
                                metavar='DIR',
                                help='Set output directory prefix to DIR',
                                envvar='RFM_OUTPUT_DIR',
                                configvar='systems/outputdir')
    output_options.add_argument('-s',
                                '--stage',
                                action='store',
                                metavar='DIR',
                                help='Set stage directory prefix to DIR',
                                envvar='RFM_STAGE_DIR',
                                configvar='systems/stagedir')
    output_options.add_argument(
        '--timestamp',
        action='store',
        nargs='?',
        const='',
        metavar='TIMEFMT',
        help=('Append a timestamp to the output and stage directory prefixes '
              '(default: "%%FT%%T")'),
        envvar='RFM_TIMESTAMP_DIRS',
        configvar='general/timestamp_dirs')
    output_options.add_argument(
        '--perflogdir',
        action='store',
        metavar='DIR',
        help=('Set performance log data directory prefix '
              '(relevant only to the filelog log handler)'),
        envvar='RFM_PERFLOG_DIR',
        configvar='logging/handlers_perflog/filelog_basedir')
    output_options.add_argument(
        '--keep-stage-files',
        action='store_true',
        help='Keep stage directories even for successful checks',
        envvar='RFM_KEEP_STAGE_FILES',
        configvar='general/keep_stage_files')
    output_options.add_argument('--dont-restage',
                                action='store_false',
                                dest='clean_stagedir',
                                help='Reuse the test stage directory',
                                envvar='RFM_CLEAN_STAGEDIR',
                                configvar='general/clean_stagedir')
    output_options.add_argument(
        '--save-log-files',
        action='store_true',
        default=False,
        help='Save ReFrame log files to the output directory',
        envvar='RFM_SAVE_LOG_FILES',
        configvar='general/save_log_files')
    output_options.add_argument('--report-file',
                                action='store',
                                metavar='FILE',
                                help="Store JSON run report in FILE",
                                envvar='RFM_REPORT_FILE',
                                configvar='general/report_file')

    # Check discovery options
    locate_options.add_argument('-c',
                                '--checkpath',
                                action='append',
                                metavar='PATH',
                                help="Add PATH to the check search path list",
                                envvar='RFM_CHECK_SEARCH_PATH :',
                                configvar='general/check_search_path')
    locate_options.add_argument(
        '-R',
        '--recursive',
        action='store_true',
        help='Search for checks in the search path recursively',
        envvar='RFM_CHECK_SEARCH_RECURSIVE',
        configvar='general/check_search_recursive')
    locate_options.add_argument('--ignore-check-conflicts',
                                action='store_true',
                                help='Skip checks with conflicting names',
                                envvar='RFM_IGNORE_CHECK_CONFLICTS',
                                configvar='general/ignore_check_conflicts')

    # Select options
    select_options.add_argument(
        '-t',
        '--tag',
        action='append',
        dest='tags',
        metavar='PATTERN',
        default=[],
        help='Select checks with at least one tag matching PATTERN')
    select_options.add_argument(
        '-n',
        '--name',
        action='append',
        dest='names',
        default=[],
        metavar='PATTERN',
        help='Select checks whose name matches PATTERN')
    select_options.add_argument(
        '-x',
        '--exclude',
        action='append',
        dest='exclude_names',
        metavar='PATTERN',
        default=[],
        help='Exclude checks whose name matches PATTERN')
    select_options.add_argument(
        '-p',
        '--prgenv',
        action='append',
        default=[r'.*'],
        metavar='PATTERN',
        help=('Select checks with at least one '
              'programming environment matching PATTERN'))
    select_options.add_argument('--gpu-only',
                                action='store_true',
                                help='Select only GPU checks')
    select_options.add_argument('--cpu-only',
                                action='store_true',
                                help='Select only CPU checks')

    # Action options
    action_options.add_argument('-l',
                                '--list',
                                action='store_true',
                                help='List the selected checks')
    action_options.add_argument(
        '-L',
        '--list-detailed',
        action='store_true',
        help='List the selected checks providing details for each test')
    action_options.add_argument('-r',
                                '--run',
                                action='store_true',
                                help='Run the selected checks')

    # Run options
    run_options.add_argument('-J',
                             '--job-option',
                             action='append',
                             metavar='OPT',
                             dest='job_options',
                             default=[],
                             help='Pass option OPT to job scheduler')
    run_options.add_argument('--force-local',
                             action='store_true',
                             help='Force local execution of checks')
    run_options.add_argument('--skip-sanity-check',
                             action='store_true',
                             help='Skip sanity checking')
    run_options.add_argument('--skip-performance-check',
                             action='store_true',
                             help='Skip performance checking')
    run_options.add_argument('--strict',
                             action='store_true',
                             help='Enforce strict performance checking')
    run_options.add_argument('--skip-system-check',
                             action='store_true',
                             help='Skip system check')
    run_options.add_argument('--skip-prgenv-check',
                             action='store_true',
                             help='Skip programming environment check')
    run_options.add_argument(
        '--exec-policy',
        metavar='POLICY',
        action='store',
        choices=['async', 'serial'],
        default='async',
        help='Set the execution policy of ReFrame (default: "async")')
    run_options.add_argument('--mode',
                             action='store',
                             help='Execution mode to use')
    run_options.add_argument(
        '--max-retries',
        metavar='NUM',
        action='store',
        default=0,
        help='Set the maximum number of times a failed regression test '
        'may be retried (default: 0)')
    run_options.add_argument(
        '--flex-alloc-nodes',
        action='store',
        dest='flex_alloc_nodes',
        metavar='{all|STATE|NUM}',
        default=None,
        help='Set strategy for the flexible node allocation (default: "idle").'
    )
    run_options.add_argument('--disable-hook',
                             action='append',
                             metavar='NAME',
                             dest='hooks',
                             default=[],
                             help='Disable a pipeline hook for this run')
    env_options.add_argument('-M',
                             '--map-module',
                             action='append',
                             metavar='MAPPING',
                             dest='module_mappings',
                             default=[],
                             help='Add a module mapping',
                             envvar='RFM_MODULE_MAPPINGS ,',
                             configvar='general/module_mappings')
    env_options.add_argument(
        '-m',
        '--module',
        action='append',
        default=[],
        metavar='MOD',
        dest='user_modules',
        help='Load module MOD before running any regression check',
        envvar='RFM_USER_MODULES ,',
        configvar='general/user_modules')
    env_options.add_argument('--module-mappings',
                             action='store',
                             metavar='FILE',
                             dest='module_map_file',
                             help='Load module mappings from FILE',
                             envvar='RFM_MODULE_MAP_FILE',
                             configvar='general/module_map_file')
    env_options.add_argument(
        '-u',
        '--unload-module',
        action='append',
        metavar='MOD',
        dest='unload_modules',
        default=[],
        help='Unload module MOD before running any regression check',
        envvar='RFM_UNLOAD_MODULES ,',
        configvar='general/unload_modules')
    env_options.add_argument(
        '--purge-env',
        action='store_true',
        dest='purge_env',
        default=False,
        help='Unload all modules before running any regression check',
        envvar='RFM_PURGE_ENVIRONMENT',
        configvar='general/purge_environment')
    env_options.add_argument(
        '--non-default-craype',
        action='store_true',
        help='Test a non-default Cray Programming Environment',
        envvar='RFM_NON_DEFAULT_CRAYPE',
        configvar='general/non_default_craype')

    # Miscellaneous options
    misc_options.add_argument('-C',
                              '--config-file',
                              action='store',
                              dest='config_file',
                              metavar='FILE',
                              help='Set configuration file',
                              envvar='RFM_CONFIG_FILE')
    misc_options.add_argument('--nocolor',
                              action='store_false',
                              dest='colorize',
                              help='Disable coloring of output',
                              envvar='RFM_COLORIZE',
                              configvar='general/colorize')
    misc_options.add_argument('--failure-stats',
                              action='store_true',
                              help='Print failure statistics')
    misc_options.add_argument('--performance-report',
                              action='store_true',
                              help='Print a report for performance tests')
    misc_options.add_argument(
        '--show-config',
        action='store',
        nargs='?',
        const='all',
        metavar='PARAM',
        help='Print the value of configuration parameter PARAM and exit')
    misc_options.add_argument('--system',
                              action='store',
                              help='Load configuration for SYSTEM',
                              envvar='RFM_SYSTEM')
    misc_options.add_argument(
        '--upgrade-config-file',
        action='store',
        metavar='OLD[:NEW]',
        help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax')
    misc_options.add_argument('-V',
                              '--version',
                              action='version',
                              version=os_ext.reframe_version())
    misc_options.add_argument('-v',
                              '--verbose',
                              action='count',
                              help='Increase verbosity level of output',
                              envvar='RFM_VERBOSE',
                              configvar='general/verbose')

    # Options not associated with command-line arguments
    argparser.add_argument(
        dest='graylog_server',
        envvar='RFM_GRAYLOG_ADDRESS',
        configvar='logging/handlers_perflog/graylog_address',
        help='Graylog server address')
    argparser.add_argument(dest='syslog_address',
                           envvar='RFM_SYSLOG_ADDRESS',
                           configvar='logging/handlers_perflog/syslog_address',
                           help='Syslog server address')
    argparser.add_argument(dest='ignore_reqnodenotavail',
                           envvar='RFM_IGNORE_REQNODENOTAVAIL',
                           configvar='schedulers/ignore_reqnodenotavail',
                           action='store_true',
                           help='Graylog server address')
    argparser.add_argument(dest='use_login_shell',
                           envvar='RFM_USE_LOGIN_SHELL',
                           configvar='general/use_login_shell',
                           action='store_true',
                           help='Use a login shell for job scripts')

    if len(sys.argv) == 1:
        argparser.print_help()
        sys.exit(1)

    # Parse command line
    options = argparser.parse_args()

    # First configure logging with our generic configuration so as to be able
    # to print pretty messages; logging will be reconfigured by user's
    # configuration later
    site_config = config.load_config(
        os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py'))
    site_config.select_subconfig('generic')
    options.update_config(site_config)
    logging.configure_logging(site_config)
    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer = PrettyPrinter()
    printer.colorize = site_config.get('general/0/colorize')
    printer.inc_verbosity(site_config.get('general/0/verbose'))
    if os.getenv('RFM_GRAYLOG_SERVER'):
        printer.warning(
            'RFM_GRAYLOG_SERVER environment variable is deprecated; '
            'please use RFM_GRAYLOG_ADDRESS instead')
        os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')

    if options.upgrade_config_file is not None:
        old_config, *new_config = options.upgrade_config_file.split(':',
                                                                    maxsplit=1)
        new_config = new_config[0] if new_config else None

        try:
            new_config = config.convert_old_config(old_config, new_config)
        except Exception as e:
            printer.error(f'could not convert file: {e}')
            sys.exit(1)

        printer.info(f'Conversion successful! '
                     f'The converted file can be found at {new_config!r}.')

        sys.exit(0)

    # Now configure ReFrame according to the user configuration file
    try:
        try:
            site_config = config.load_config(options.config_file)
        except ReframeDeprecationWarning as e:
            printer.warning(e)
            converted = config.convert_old_config(options.config_file)
            printer.warning(f"configuration file has been converted "
                            f"to the new syntax here: '{converted}'")
            site_config = config.load_config(converted)

        site_config.validate()

        # We ignore errors about unresolved sections or configuration
        # parameters here, because they might be defined at the individual
        # partition level and will be caught when we will instantiating
        # internally the system and partitions later on.
        site_config.select_subconfig(options.system,
                                     ignore_resolve_errors=True)
        for err in options.update_config(site_config):
            printer.warning(str(err))

        # Update options from the selected execution mode
        if options.mode:
            mode_args = site_config.get(f'modes/@{options.mode}/options')

            # Parse the mode's options and reparse the command-line
            options = argparser.parse_args(mode_args)
            options = argparser.parse_args(namespace=options.cmd_options)
            options.update_config(site_config)

        logging.configure_logging(site_config)
    except (OSError, ConfigError) as e:
        printer.error(f'failed to load configuration: {e}')
        sys.exit(1)

    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer.colorize = site_config.get('general/0/colorize')
    printer.inc_verbosity(site_config.get('general/0/verbose'))
    try:
        runtime.init_runtime(site_config)
    except ConfigError as e:
        printer.error(f'failed to initialize runtime: {e}')
        sys.exit(1)

    rt = runtime.runtime()
    try:
        if site_config.get('general/0/module_map_file'):
            rt.modules_system.load_mapping_from_file(
                site_config.get('general/0/module_map_file'))

        if site_config.get('general/0/module_mappings'):
            for m in site_config.get('general/0/module_mappings'):
                rt.modules_system.load_mapping(m)

    except (ConfigError, OSError) as e:
        printer.error('could not load module mappings: %s' % e)
        sys.exit(1)

    if (os_ext.samefile(rt.stage_prefix, rt.output_prefix)
            and not site_config.get('general/0/keep_stage_files')):
        printer.error("stage and output refer to the same directory; "
                      "if this is on purpose, please use the "
                      "'--keep-stage-files' option.")
        sys.exit(1)

    # Show configuration after everything is set up
    if options.show_config:
        config_param = options.show_config
        if config_param == 'all':
            printer.info(str(rt.site_config))
        else:
            value = rt.get_option(config_param)
            if value is None:
                printer.error(
                    f'no such configuration parameter found: {config_param}')
            else:
                printer.info(json.dumps(value, indent=2))

        sys.exit(0)

    printer.debug(format_env(options.env_vars))

    # Setup the check loader
    loader = RegressionCheckLoader(
        load_path=site_config.get('general/0/check_search_path'),
        recurse=site_config.get('general/0/check_search_recursive'),
        ignore_conflicts=site_config.get('general/0/ignore_check_conflicts'))

    def print_infoline(param, value):
        param = param + ':'
        printer.info(f"  {param.ljust(18)} {value}")

    session_info = {
        'cmdline': ' '.join(sys.argv),
        'config_file': rt.site_config.filename,
        'data_version': '1.0',
        'hostname': socket.gethostname(),
        'prefix_output': rt.output_prefix,
        'prefix_stage': rt.stage_prefix,
        'user': os_ext.osuser(),
        'version': os_ext.reframe_version(),
        'workdir': os.getcwd(),
    }

    # Print command line
    printer.info(f"[ReFrame Setup]")
    print_infoline('version', session_info['version'])
    print_infoline('command', repr(session_info['cmdline']))
    print_infoline(
        f"launched by",
        f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}")
    print_infoline('working directory', repr(session_info['workdir']))
    print_infoline('settings file', f"{session_info['config_file']!r}")
    print_infoline(
        'check search path', f"{'(R) ' if loader.recurse else ''}"
        f"{':'.join(loader.load_path)!r}")
    print_infoline('stage directory', repr(session_info['prefix_stage']))
    print_infoline('output directory', repr(session_info['prefix_output']))
    printer.info('')
    try:
        # Locate and load checks
        try:
            checks_found = loader.load_all()
        except OSError as e:
            raise ReframeError from e

        # Filter checks by name
        checks_matched = checks_found
        if options.exclude_names:
            for name in options.exclude_names:
                checks_matched = filter(filters.have_not_name(name),
                                        checks_matched)

        if options.names:
            checks_matched = filter(filters.have_name('|'.join(options.names)),
                                    checks_matched)

        # Filter checks by tags
        for tag in options.tags:
            checks_matched = filter(filters.have_tag(tag), checks_matched)

        # Filter checks by prgenv
        if not options.skip_prgenv_check:
            for prgenv in options.prgenv:
                checks_matched = filter(filters.have_prgenv(prgenv),
                                        checks_matched)

        # Filter checks by system
        if not options.skip_system_check:
            checks_matched = filter(
                filters.have_partition(rt.system.partitions), checks_matched)

        # Filter checks further
        if options.gpu_only and options.cpu_only:
            printer.error("options `--gpu-only' and `--cpu-only' "
                          "are mutually exclusive")
            sys.exit(1)

        if options.gpu_only:
            checks_matched = filter(filters.have_gpu_only(), checks_matched)
        elif options.cpu_only:
            checks_matched = filter(filters.have_cpu_only(), checks_matched)

        # Determine the allowed programming environments
        allowed_environs = {
            e.name
            for env_patt in options.prgenv for p in rt.system.partitions
            for e in p.environs if re.match(env_patt, e.name)
        }

        # Generate the test cases, validate dependencies and sort them
        checks_matched = list(checks_matched)

        # Disable hooks
        for c in checks_matched:
            for h in options.hooks:
                type(c).disable_hook(h)

        testcases = generate_testcases(checks_matched,
                                       options.skip_system_check,
                                       options.skip_prgenv_check,
                                       allowed_environs)
        testgraph = dependency.build_deps(testcases)
        dependency.validate_deps(testgraph)
        testcases = dependency.toposort(testgraph)

        # Manipulate ReFrame's environment
        if site_config.get('general/0/purge_environment'):
            rt.modules_system.unload_all()
        else:
            for m in site_config.get('general/0/unload_modules'):
                rt.modules_system.unload_module(m)

        # Load the environment for the current system
        try:
            runtime.loadenv(rt.system.preload_environ)
        except EnvironError as e:
            printer.error("failed to load current system's environment; "
                          "please check your configuration")
            printer.debug(str(e))
            raise

        for m in site_config.get('general/0/user_modules'):
            try:
                rt.modules_system.load_module(m, force=True)
            except EnvironError as e:
                printer.warning("could not load module '%s' correctly: "
                                "Skipping..." % m)
                printer.debug(str(e))

        options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'

        # Act on checks
        success = True
        if options.list or options.list_detailed:
            list_checks(list(checks_matched), printer, options.list_detailed)
        elif options.run:
            # Setup the execution policy
            if options.exec_policy == 'serial':
                exec_policy = SerialExecutionPolicy()
            elif options.exec_policy == 'async':
                exec_policy = AsynchronousExecutionPolicy()
            else:
                # This should not happen, since choices are handled by
                # argparser
                printer.error("unknown execution policy `%s': Exiting...")
                sys.exit(1)

            exec_policy.skip_system_check = options.skip_system_check
            exec_policy.force_local = options.force_local
            exec_policy.strict_check = options.strict
            exec_policy.skip_sanity_check = options.skip_sanity_check
            exec_policy.skip_performance_check = options.skip_performance_check
            exec_policy.keep_stage_files = site_config.get(
                'general/0/keep_stage_files')
            try:
                errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
                sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
                if sched_flex_alloc_nodes <= 0:
                    raise ConfigError(errmsg.format(options.flex_alloc_nodes))
            except ValueError:
                sched_flex_alloc_nodes = options.flex_alloc_nodes

            exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
            parsed_job_options = []
            for opt in options.job_options:
                if opt.startswith('-') or opt.startswith('#'):
                    parsed_job_options.append(opt)
                elif len(opt) == 1:
                    parsed_job_options.append(f'-{opt}')
                else:
                    parsed_job_options.append(f'--{opt}')

            exec_policy.sched_options = parsed_job_options
            try:
                max_retries = int(options.max_retries)
            except ValueError:
                raise ConfigError('--max-retries is not a valid integer: %s' %
                                  max_retries) from None
            runner = Runner(exec_policy, printer, max_retries)
            try:
                time_start = time.time()
                session_info['time_start'] = time.strftime(
                    '%FT%T%z',
                    time.localtime(time_start),
                )
                runner.runall(testcases)
            finally:
                time_end = time.time()
                session_info['time_end'] = time.strftime(
                    '%FT%T%z', time.localtime(time_end))
                session_info['time_elapsed'] = time_end - time_start

                # Print a retry report if we did any retries
                if runner.stats.failures(run=0):
                    printer.info(runner.stats.retry_report())

                # Print a failure report if we had failures in the last run
                if runner.stats.failures():
                    printer.info(runner.stats.failure_report())
                    success = False
                    if options.failure_stats:
                        printer.info(runner.stats.failure_stats())

                if options.performance_report:
                    printer.info(runner.stats.performance_report())

                # Generate the report for this session
                report_file = os.path.normpath(
                    os_ext.expandvars(rt.get_option('general/0/report_file')))
                basedir = os.path.dirname(report_file)
                if basedir:
                    os.makedirs(basedir, exist_ok=True)

                # Build final JSON report
                run_stats = runner.stats.json()
                session_info.update({
                    'num_cases':
                    run_stats[0]['num_cases'],
                    'num_failures':
                    run_stats[-1]['num_failures']
                })
                json_report = {'session_info': session_info, 'runs': run_stats}
                report_file = generate_report_filename(report_file)
                try:
                    with open(report_file, 'w') as fp:
                        jsonext.dump(json_report, fp, indent=2)
                except OSError as e:
                    printer.warning(
                        f'failed to generate report in {report_file!r}: {e}')

        else:
            printer.error("No action specified. Please specify `-l'/`-L' for "
                          "listing or `-r' for running. "
                          "Try `%s -h' for more options." % argparser.prog)
            sys.exit(1)

        if not success:
            sys.exit(1)

        sys.exit(0)

    except KeyboardInterrupt:
        sys.exit(1)
    except ReframeError as e:
        printer.error(str(e))
        sys.exit(1)
    except (Exception, ReframeFatalError):
        printer.error(format_exception(*sys.exc_info()))
        sys.exit(1)
    finally:
        try:
            log_files = logging.log_files()
            if site_config.get('general/0/save_log_files'):
                log_files = logging.save_log_files(rt.output_prefix)

        except OSError as e:
            printer.error('could not save log file: %s' % e)
            sys.exit(1)
        finally:
            if not log_files:
                msg = '<no log file was generated>'
            else:
                msg = f'{", ".join(repr(f) for f in log_files)}'

            printer.info(f'Log file(s) saved in: {msg}')
Exemplo n.º 26
0
def loader():
    return RegressionCheckLoader([
        'unittests/resources/checks_unlisted/deps_simple.py'
    ])
Exemplo n.º 27
0
def main():
    # Setup command line options
    argparser = argparse.ArgumentParser()
    output_options = argparser.add_argument_group('Options controlling output')
    locate_options = argparser.add_argument_group(
        'Options for locating checks')
    select_options = argparser.add_argument_group(
        'Options for selecting checks')
    action_options = argparser.add_argument_group(
        'Options controlling actions')
    run_options = argparser.add_argument_group(
        'Options controlling execution of checks')
    env_options = argparser.add_argument_group(
        'Options controlling environment')
    misc_options = argparser.add_argument_group('Miscellaneous options')

    # Output directory options
    output_options.add_argument('--prefix',
                                action='store',
                                metavar='DIR',
                                help='Set output directory prefix to DIR')
    output_options.add_argument('-o',
                                '--output',
                                action='store',
                                metavar='DIR',
                                help='Set output directory to DIR')
    output_options.add_argument('-s',
                                '--stage',
                                action='store',
                                metavar='DIR',
                                help='Set stage directory to DIR')
    output_options.add_argument(
        '--perflogdir',
        action='store',
        metavar='DIR',
        help='Set directory prefix for the performance logs '
        '(default: ${prefix}/perflogs, '
        'relevant only if the filelog backend is used)')
    output_options.add_argument(
        '--keep-stage-files',
        action='store_true',
        help='Keep stage directory even if check is successful')
    output_options.add_argument(
        '--save-log-files',
        action='store_true',
        default=False,
        help='Copy the log file from the work dir to the output dir at the '
        'end of the program')

    # Check discovery options
    locate_options.add_argument('-c',
                                '--checkpath',
                                action='append',
                                metavar='DIR|FILE',
                                help='Search for checks in DIR or FILE')
    locate_options.add_argument('-R',
                                '--recursive',
                                action='store_true',
                                help='Load checks recursively')
    locate_options.add_argument('--ignore-check-conflicts',
                                action='store_true',
                                help='Skip checks with conflicting names')

    # Select options
    select_options.add_argument('-t',
                                '--tag',
                                action='append',
                                dest='tags',
                                default=[],
                                help='Select checks matching TAG')
    select_options.add_argument('-n',
                                '--name',
                                action='append',
                                dest='names',
                                default=[],
                                metavar='NAME',
                                help='Select checks with NAME')
    select_options.add_argument('-x',
                                '--exclude',
                                action='append',
                                dest='exclude_names',
                                metavar='NAME',
                                default=[],
                                help='Exclude checks with NAME')
    select_options.add_argument(
        '-p',
        '--prgenv',
        action='append',
        default=[r'.*'],
        help='Select tests for PRGENV programming environment only')
    select_options.add_argument('--gpu-only',
                                action='store_true',
                                help='Select only GPU tests')
    select_options.add_argument('--cpu-only',
                                action='store_true',
                                help='Select only CPU tests')

    # Action options
    action_options.add_argument('-l',
                                '--list',
                                action='store_true',
                                help='List matched regression checks')
    action_options.add_argument(
        '-L',
        '--list-detailed',
        action='store_true',
        help='List matched regression checks with a detailed description')
    action_options.add_argument('-r',
                                '--run',
                                action='store_true',
                                help='Run regression with the selected checks')

    # Run options
    run_options.add_argument('-A',
                             '--account',
                             action='store',
                             help='Use ACCOUNT for submitting jobs')
    run_options.add_argument('-P',
                             '--partition',
                             action='store',
                             metavar='PART',
                             help='Use PART for submitting jobs')
    run_options.add_argument('--reservation',
                             action='store',
                             metavar='RES',
                             help='Use RES for submitting jobs')
    run_options.add_argument('--nodelist',
                             action='store',
                             help='Run checks on the selected list of nodes')
    run_options.add_argument(
        '--exclude-nodes',
        action='store',
        metavar='NODELIST',
        help='Exclude the list of nodes from running checks')
    run_options.add_argument('--job-option',
                             action='append',
                             metavar='OPT',
                             dest='job_options',
                             default=[],
                             help='Pass OPT to job scheduler')
    run_options.add_argument('--force-local',
                             action='store_true',
                             help='Force local execution of checks')
    run_options.add_argument('--skip-sanity-check',
                             action='store_true',
                             help='Skip sanity checking')
    run_options.add_argument('--skip-performance-check',
                             action='store_true',
                             help='Skip performance checking')
    run_options.add_argument('--strict',
                             action='store_true',
                             help='Force strict performance checking')
    run_options.add_argument('--skip-system-check',
                             action='store_true',
                             help='Skip system check')
    run_options.add_argument('--skip-prgenv-check',
                             action='store_true',
                             help='Skip prog. environment check')
    run_options.add_argument(
        '--exec-policy',
        metavar='POLICY',
        action='store',
        choices=['serial', 'async'],
        default='serial',
        help='Specify the execution policy for running the regression tests. '
        'Available policies: "serial" (default), "async"')
    run_options.add_argument('--mode',
                             action='store',
                             help='Execution mode to use')
    run_options.add_argument(
        '--max-retries',
        metavar='NUM',
        action='store',
        default=0,
        help='Specify the maximum number of times a failed regression test '
        'may be retried (default: 0)')
    run_options.add_argument(
        '--flex-alloc-tasks',
        action='store',
        dest='flex_alloc_tasks',
        metavar='{all|idle|NUM}',
        default='idle',
        help="Strategy for flexible task allocation (default: 'idle').")

    env_options.add_argument('-M',
                             '--map-module',
                             action='append',
                             metavar='MAPPING',
                             dest='module_mappings',
                             default=[],
                             help='Apply a single module mapping')
    env_options.add_argument(
        '-m',
        '--module',
        action='append',
        default=[],
        metavar='MOD',
        dest='user_modules',
        help='Load module MOD before running the regression suite')
    env_options.add_argument('--module-mappings',
                             action='store',
                             metavar='FILE',
                             dest='module_map_file',
                             help='Apply module mappings defined in FILE')
    env_options.add_argument(
        '-u',
        '--unload-module',
        action='append',
        metavar='MOD',
        dest='unload_modules',
        default=[],
        help='Unload module MOD before running the regression suite')
    env_options.add_argument(
        '--purge-env',
        action='store_true',
        dest='purge_env',
        default=False,
        help='Purge environment before running the regression suite')

    # Miscellaneous options
    misc_options.add_argument(
        '-C',
        '--config-file',
        action='store',
        dest='config_file',
        metavar='FILE',
        default=os.path.join(reframe.INSTALL_PREFIX, 'reframe/settings.py'),
        help='Specify a custom config-file for the machine. '
        '(default: %s' %
        os.path.join(reframe.INSTALL_PREFIX, 'reframe/settings.py'))
    misc_options.add_argument('--nocolor',
                              action='store_false',
                              dest='colorize',
                              default=True,
                              help='Disable coloring of output')
    misc_options.add_argument('--performance-report',
                              action='store_true',
                              help='Print the performance report')

    # FIXME: This should move to env_options as soon as
    # https://github.com/eth-cscs/reframe/pull/946 is merged
    misc_options.add_argument('--non-default-craype',
                              action='store_true',
                              default=False,
                              help='Test a non-default Cray PE')
    misc_options.add_argument(
        '--show-config',
        action='store_true',
        help='Print configuration of the current system and exit')
    misc_options.add_argument(
        '--show-config-env',
        action='store',
        metavar='ENV',
        help='Print configuration of environment ENV and exit')
    misc_options.add_argument('--system',
                              action='store',
                              help='Load SYSTEM configuration explicitly')
    misc_options.add_argument(
        '--timestamp',
        action='store',
        nargs='?',
        const='%FT%T',
        metavar='TIMEFMT',
        help='Append a timestamp component to the regression directories'
        '(default format "%%FT%%T")')
    misc_options.add_argument('-V',
                              '--version',
                              action='version',
                              version=reframe.VERSION)
    misc_options.add_argument('-v',
                              '--verbose',
                              action='count',
                              default=0,
                              help='Increase verbosity level of output')

    if len(sys.argv) == 1:
        argparser.print_help()
        sys.exit(1)

    # Parse command line
    options = argparser.parse_args()

    # Load configuration
    try:
        settings = config.load_settings_from_file(options.config_file)
    except (OSError, ReframeError) as e:
        sys.stderr.write('%s: could not load settings: %s\n' %
                         (sys.argv[0], e))
        sys.exit(1)

    # Configure logging
    try:
        logging.configure_logging(settings.logging_config),
    except (OSError, ConfigError) as e:
        sys.stderr.write('could not configure logging: %s\n' % e)
        sys.exit(1)

    # Set colors in logger
    logging.getlogger().colorize = options.colorize

    # Setup printer
    printer = PrettyPrinter()
    printer.colorize = options.colorize
    if options.verbose:
        printer.inc_verbosity(options.verbose)

    try:
        runtime.init_runtime(settings.site_configuration,
                             options.system,
                             non_default_craype=options.non_default_craype)
    except SystemAutodetectionError:
        printer.warning(
            'could not find a configuration entry for the current system; '
            'falling back to a generic system configuration; '
            'please check the online documentation on how to configure '
            'ReFrame for your system.')
        settings.site_configuration['systems'] = {
            'generic': {
                'descr': 'Generic fallback system configuration',
                'hostnames': ['localhost'],
                'partitions': {
                    'login': {
                        'scheduler': 'local',
                        'environs': ['builtin-gcc'],
                        'descr': 'Login nodes'
                    }
                }
            }
        }
        settings.site_configuration['environments'] = {
            '*': {
                'builtin-gcc': {
                    'type': 'ProgEnvironment',
                    'cc': 'gcc',
                    'cxx': 'g++',
                    'ftn': 'gfortran',
                }
            }
        }
        runtime.init_runtime(settings.site_configuration,
                             'generic',
                             non_default_craype=options.non_default_craype)
    except Exception as e:
        printer.error('configuration error: %s' % e)
        printer.verbose(''.join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)

    rt = runtime.runtime()
    try:
        if options.module_map_file:
            rt.modules_system.load_mapping_from_file(options.module_map_file)

        if options.module_mappings:
            for m in options.module_mappings:
                rt.modules_system.load_mapping(m)

    except (ConfigError, OSError) as e:
        printer.error('could not load module mappings: %s' % e)
        sys.exit(1)

    if options.mode:
        try:
            mode_args = rt.mode(options.mode)

            # Parse the mode's options and reparse the command-line
            options = argparser.parse_args(mode_args)
            options = argparser.parse_args(namespace=options)
        except ConfigError as e:
            printer.error('could not obtain execution mode: %s' % e)
            sys.exit(1)

    # Adjust system directories
    if options.prefix:
        # if prefix is set, reset all other directories
        rt.resources.prefix = os_ext.expandvars(options.prefix)
        rt.resources.outputdir = None
        rt.resources.stagedir = None

    if options.output:
        rt.resources.outputdir = os_ext.expandvars(options.output)

    if options.stage:
        rt.resources.stagedir = os_ext.expandvars(options.stage)

    if (os_ext.samefile(rt.resources.stage_prefix, rt.resources.output_prefix)
            and not options.keep_stage_files):
        printer.error('stage and output refer to the same directory; '
                      'if this is on purpose, please use also the '
                      "`--keep-stage-files' option.")
        sys.exit(1)

    if options.timestamp:
        rt.resources.timefmt = options.timestamp

    # Configure performance logging
    # NOTE: we need resources to be configured in order to set the global
    # perf. logging prefix correctly
    if options.perflogdir:
        rt.resources.perflogdir = os_ext.expandvars(options.perflogdir)

    logging.LOG_CONFIG_OPTS['handlers.filelog.prefix'] = (
        rt.resources.perflog_prefix)

    # Show configuration after everything is set up
    if options.show_config:
        printer.info(rt.show_config())
        sys.exit(0)

    if options.show_config_env:
        envname = options.show_config_env
        for p in rt.system.partitions:
            environ = p.environment(envname)
            if environ:
                break

        if environ is None:
            printer.error('no such environment: ' + envname)
            sys.exit(1)

        printer.info(environ.details())
        sys.exit(0)

    if hasattr(settings, 'perf_logging_config'):
        try:
            logging.configure_perflogging(settings.perf_logging_config)
        except (OSError, ConfigError) as e:
            printer.error('could not configure performance logging: %s\n' % e)
            sys.exit(1)
    else:
        printer.warning('no performance logging is configured; '
                        'please check documentation')

    # Setup the check loader
    if options.checkpath:
        load_path = []
        for d in options.checkpath:
            d = os_ext.expandvars(d)
            if not os.path.exists(d):
                printer.warning("%s: path `%s' does not exist. Skipping..." %
                                (argparser.prog, d))
                continue

            load_path.append(d)

        loader = RegressionCheckLoader(
            load_path,
            recurse=options.recursive,
            ignore_conflicts=options.ignore_check_conflicts)
    else:
        loader = RegressionCheckLoader(load_path=settings.checks_path,
                                       prefix=reframe.INSTALL_PREFIX,
                                       recurse=settings.checks_path_recurse)

    printer.debug(argparse.format_options(options))

    # Print command line
    printer.info('Command line: %s' % ' '.join(sys.argv))
    printer.info('Reframe version: ' + reframe.VERSION)
    printer.info('Launched by user: '******'<unknown>'))
    printer.info('Launched on host: ' + socket.gethostname())

    # Print important paths
    printer.info('Reframe paths')
    printer.info('=============')
    printer.info('    Check prefix      : %s' % loader.prefix)
    printer.info(
        '%03s Check search path : %s' %
        ('(R)' if loader.recurse else '', "'%s'" % ':'.join(loader.load_path)))
    printer.info('    Stage dir prefix     : %s' % rt.resources.stage_prefix)
    printer.info('    Output dir prefix    : %s' % rt.resources.output_prefix)
    printer.info(
        '    Perf. logging prefix : %s' %
        os.path.abspath(logging.LOG_CONFIG_OPTS['handlers.filelog.prefix']))
    try:
        # Locate and load checks
        try:
            checks_found = loader.load_all()
        except OSError as e:
            raise ReframeError from e

        # Filter checks by name
        checks_matched = checks_found
        if options.exclude_names:
            for name in options.exclude_names:
                checks_matched = filter(filters.have_not_name(name),
                                        checks_matched)

        if options.names:
            checks_matched = filter(filters.have_name('|'.join(options.names)),
                                    checks_matched)

        # Filter checks by tags
        for tag in options.tags:
            checks_matched = filter(filters.have_tag(tag), checks_matched)

        # Filter checks by prgenv
        if not options.skip_prgenv_check:
            for prgenv in options.prgenv:
                checks_matched = filter(filters.have_prgenv(prgenv),
                                        checks_matched)

        # Filter checks by system
        if not options.skip_system_check:
            checks_matched = filter(
                filters.have_partition(rt.system.partitions), checks_matched)

        # Filter checks further
        if options.gpu_only and options.cpu_only:
            printer.error("options `--gpu-only' and `--cpu-only' "
                          "are mutually exclusive")
            sys.exit(1)

        if options.gpu_only:
            checks_matched = filter(filters.have_gpu_only(), checks_matched)
        elif options.cpu_only:
            checks_matched = filter(filters.have_cpu_only(), checks_matched)

        # Determine the allowed programming environments
        allowed_environs = {
            e.name
            for env_patt in options.prgenv for p in rt.system.partitions
            for e in p.environs if re.match(env_patt, e.name)
        }

        # Generate the test cases, validate dependencies and sort them
        checks_matched = list(checks_matched)
        testcases = generate_testcases(checks_matched,
                                       options.skip_system_check,
                                       options.skip_prgenv_check,
                                       allowed_environs)
        testgraph = dependency.build_deps(testcases)
        dependency.validate_deps(testgraph)
        testcases = dependency.toposort(testgraph)

        # Unload regression's module and load user-specified modules
        if hasattr(settings, 'reframe_module'):
            printer.warning(
                "the 'reframe_module' configuration option will be ignored; "
                "please use the '-u' or '--unload-module' options")

        if options.purge_env:
            rt.modules_system.unload_all()
        else:
            for m in options.unload_modules:
                rt.modules_system.unload_module(m)

        # Load the environment for the current system
        try:
            env.load(rt.system.preload_environ)
        except EnvironError as e:
            printer.error("failed to load current system's environment; "
                          "please check your configuration")
            printer.debug(str(e))
            raise

        for m in options.user_modules:
            try:
                rt.modules_system.load_module(m, force=True)
            except EnvironError as e:
                printer.warning("could not load module '%s' correctly: "
                                "Skipping..." % m)
                printer.debug(str(e))

        # Act on checks

        success = True
        if options.list:
            # List matched checks
            list_checks(list(checks_matched), printer)
        elif options.list_detailed:
            # List matched checks with details
            list_checks(list(checks_matched), printer, detailed=True)

        elif options.run:
            # Setup the execution policy
            if options.exec_policy == 'serial':
                exec_policy = SerialExecutionPolicy()
            elif options.exec_policy == 'async':
                exec_policy = AsynchronousExecutionPolicy()
            else:
                # This should not happen, since choices are handled by
                # argparser
                printer.error("unknown execution policy `%s': Exiting...")
                sys.exit(1)

            exec_policy.skip_system_check = options.skip_system_check
            exec_policy.force_local = options.force_local
            exec_policy.strict_check = options.strict
            exec_policy.skip_sanity_check = options.skip_sanity_check
            exec_policy.skip_performance_check = options.skip_performance_check
            exec_policy.keep_stage_files = options.keep_stage_files
            try:
                errmsg = "invalid option for --flex-alloc-tasks: '{0}'"
                sched_flex_alloc_tasks = int(options.flex_alloc_tasks)
                if sched_flex_alloc_tasks <= 0:
                    raise ConfigError(errmsg.format(options.flex_alloc_tasks))
            except ValueError:
                if not options.flex_alloc_tasks.lower() in {'idle', 'all'}:
                    raise ConfigError(errmsg.format(
                        options.flex_alloc_tasks)) from None

                sched_flex_alloc_tasks = options.flex_alloc_tasks

            exec_policy.sched_flex_alloc_tasks = sched_flex_alloc_tasks
            exec_policy.flex_alloc_tasks = options.flex_alloc_tasks
            exec_policy.sched_account = options.account
            exec_policy.sched_partition = options.partition
            exec_policy.sched_reservation = options.reservation
            exec_policy.sched_nodelist = options.nodelist
            exec_policy.sched_exclude_nodelist = options.exclude_nodes
            exec_policy.sched_options = options.job_options
            try:
                max_retries = int(options.max_retries)
            except ValueError:
                raise ConfigError('--max-retries is not a valid integer: %s' %
                                  max_retries) from None
            runner = Runner(exec_policy, printer, max_retries)
            try:
                runner.runall(testcases)
            finally:
                # Print a retry report if we did any retries
                if runner.stats.failures(run=0):
                    printer.info(runner.stats.retry_report())

                # Print a failure report if we had failures in the last run
                if runner.stats.failures():
                    printer.info(runner.stats.failure_report())
                    success = False

                if options.performance_report:
                    printer.info(runner.stats.performance_report())

        else:
            printer.info('No action specified. Exiting...')
            printer.info("Try `%s -h' for a list of available actions." %
                         argparser.prog)
            sys.exit(1)

        if not success:
            sys.exit(1)

        sys.exit(0)

    except KeyboardInterrupt:
        sys.exit(1)
    except ReframeError as e:
        printer.error(str(e))
        sys.exit(1)
    except (Exception, ReframeFatalError):
        printer.error(format_exception(*sys.exc_info()))
        sys.exit(1)
    finally:
        try:
            if options.save_log_files:
                logging.save_log_files(rt.resources.output_prefix)

        except OSError as e:
            printer.error('could not save log file: %s' % e)
            sys.exit(1)
Exemplo n.º 28
0
class TestSerialExecutionPolicy(unittest.TestCase):
    def setUp(self):
        self.loader = RegressionCheckLoader(['unittests/resources/checks'],
                                            ignore_conflicts=True)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all()

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

        # Reset current_run
        rt.runtime()._current_run = 0

    def tearDown(self):
        os_ext.rmtree(rt.runtime().resources.prefix)

    def runall(self, checks, sort=False, *args, **kwargs):
        cases = executors.generate_testcases(checks, *args, **kwargs)
        if sort:
            depgraph = dependency.build_deps(cases)
            dependency.validate_deps(depgraph)
            cases = dependency.toposort(depgraph)

        self.runner.runall(cases)

    def assertRunall(self):
        # Make sure that all cases finished or failed
        for t in self.runner.stats.tasks():
            assert t.succeeded or t.failed

    def _num_failures_stage(self, stage):
        stats = self.runner.stats
        return len([t for t in stats.failures() if t.failed_stage == stage])

    def assert_all_dead(self):
        stats = self.runner.stats
        for t in self.runner.stats.tasks():
            try:
                finished = t.check.poll()
            except JobNotStartedError:
                finished = True

            assert finished

    def test_runall(self):
        self.runall(self.checks)

        stats = self.runner.stats
        assert 8 == stats.num_cases()
        self.assertRunall()
        assert 5 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 1 == self._num_failures_stage('sanity')
        assert 1 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_runall_skip_system_check(self):
        self.runall(self.checks, skip_system_check=True)

        stats = self.runner.stats
        assert 9 == stats.num_cases()
        self.assertRunall()
        assert 5 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 1 == self._num_failures_stage('sanity')
        assert 1 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_runall_skip_prgenv_check(self):
        self.runall(self.checks, skip_environ_check=True)

        stats = self.runner.stats
        assert 9 == stats.num_cases()
        self.assertRunall()
        assert 5 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 1 == self._num_failures_stage('sanity')
        assert 1 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_runall_skip_sanity_check(self):
        self.runner.policy.skip_sanity_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        assert 8 == stats.num_cases()
        self.assertRunall()
        assert 4 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 0 == self._num_failures_stage('sanity')
        assert 1 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_runall_skip_performance_check(self):
        self.runner.policy.skip_performance_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        assert 8 == stats.num_cases()
        self.assertRunall()
        assert 4 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 1 == self._num_failures_stage('sanity')
        assert 0 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_strict_performance_check(self):
        self.runner.policy.strict_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        assert 8 == stats.num_cases()
        self.assertRunall()
        assert 6 == len(stats.failures())
        assert 2 == self._num_failures_stage('setup')
        assert 1 == self._num_failures_stage('sanity')
        assert 2 == self._num_failures_stage('performance')
        assert 1 == self._num_failures_stage('cleanup')

    def test_force_local_execution(self):
        self.runner.policy.force_local = True
        self.runall([HelloTest()])
        self.assertRunall()
        stats = self.runner.stats
        for t in stats.tasks():
            assert t.check.local

    def test_kbd_interrupt_within_test(self):
        check = KeyboardInterruptCheck()
        with pytest.raises(KeyboardInterrupt):
            self.runall([check])

        stats = self.runner.stats
        assert 1 == len(stats.failures())
        self.assert_all_dead()

    def test_system_exit_within_test(self):
        check = SystemExitCheck()

        # This should not raise and should not exit
        self.runall([check])
        stats = self.runner.stats
        assert 1 == len(stats.failures())

    def test_retries_bad_check(self):
        max_retries = 2
        checks = [BadSetupCheck(), BadSetupCheckEarly()]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test was retried #max_retries times and failed.
        assert 2 == self.runner.stats.num_cases()
        self.assertRunall()
        assert max_retries == rt.runtime().current_run
        assert 2 == len(self.runner.stats.failures())

        # Ensure that the report does not raise any exception.
        self.runner.stats.retry_report()

    def test_retries_good_check(self):
        max_retries = 2
        checks = [HelloTest()]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test passed without retries.
        assert 1 == self.runner.stats.num_cases()
        self.assertRunall()
        assert 0 == rt.runtime().current_run
        assert 0 == len(self.runner.stats.failures())

    def test_pass_in_retries(self):
        max_retries = 3
        run_to_pass = 2
        # Create a file containing the current_run; Run 0 will set it to 0,
        # run 1 to 1 and so on.
        with tempfile.NamedTemporaryFile(mode='wt', delete=False) as fp:
            fp.write('0\n')

        checks = [RetriesCheck(run_to_pass, fp.name)]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test passed after retries in run #run_to_pass.
        assert 1 == self.runner.stats.num_cases()
        self.assertRunall()
        assert 1 == len(self.runner.stats.failures(run=0))
        assert run_to_pass == rt.runtime().current_run
        assert 0 == len(self.runner.stats.failures())
        os.remove(fp.name)

    def test_dependencies(self):
        self.loader = RegressionCheckLoader(
            ['unittests/resources/checks_unlisted/deps_complex.py'])

        # Setup the runner
        self.checks = self.loader.load_all()
        self.runall(self.checks, sort=True)

        self.assertRunall()
        stats = self.runner.stats
        assert stats.num_cases(0) == 10
        assert len(stats.failures()) == 4
        for tf in stats.failures():
            check = tf.testcase.check
            _, exc_value, _ = tf.exc_info
            if check.name == 'T7' or check.name == 'T9':
                assert isinstance(exc_value, TaskDependencyError)

        # Check that cleanup is executed properly for successful tests as well
        for t in stats.tasks():
            check = t.testcase.check
            if t.failed:
                continue

            if t.ref_count == 0:
                assert os.path.exists(os.path.join(check.outputdir, 'out.txt'))

    def test_sigterm(self):
        # Wrapper of self.runall which is used from a child process and
        # passes any exception, number of cases and failures to the parent
        # process
        def _runall(checks, ns):
            exc = None
            try:
                self.runall(checks)
            except BaseException as e:
                exc = e
            finally:
                ns.exc = exc
                ns.num_cases = self.runner.stats.num_cases()
                ns.num_failures = len(self.runner.stats.failures())

        with multiprocessing.Manager() as manager:
            ns = manager.Namespace()
            p = multiprocessing.Process(target=_runall,
                                        args=([SleepCheck(20)], ns))

            p.start()

            # Allow some time so that the SleepCheck is submitted.
            # The sleep time of the submitted test is much larger to
            # ensure that it does not finish before the termination signal
            time.sleep(0.2)
            p.terminate()
            p.join()

            # Either the test is submitted and it fails due to the termination
            # or it is not yet submitted when the termination signal is sent
            assert (ns.num_cases, ns.num_failures) in {(1, 1), (0, 0)}
            with pytest.raises(ReframeForceExitError,
                               match='received TERM signal'):
                if ns.exc:
                    raise ns.exc

    def test_dependencies_with_retries(self):
        self.runner._max_retries = 2
        self.test_dependencies()
Exemplo n.º 29
0
class TestSerialExecutionPolicy(unittest.TestCase):
    def setUp(self):
        self.loader = RegressionCheckLoader(['unittests/resources/checks'],
                                            ignore_conflicts=True)

        # Setup the runner
        self.runner = executors.Runner(policies.SerialExecutionPolicy())
        self.checks = self.loader.load_all()

        # Set runtime prefix
        rt.runtime().resources.prefix = tempfile.mkdtemp(dir='unittests')

        # Reset current_run
        rt.runtime()._current_run = 0

    def tearDown(self):
        os_ext.rmtree(rt.runtime().resources.prefix)

    def runall(self, checks, sort=False, *args, **kwargs):
        cases = executors.generate_testcases(checks, *args, **kwargs)
        if sort:
            depgraph = dependency.build_deps(cases)
            dependency.validate_deps(depgraph)
            cases = dependency.toposort(depgraph)

        self.runner.runall(cases)

    def _num_failures_stage(self, stage):
        stats = self.runner.stats
        return len([t for t in stats.failures() if t.failed_stage == stage])

    def assert_all_dead(self):
        stats = self.runner.stats
        for t in self.runner.stats.tasks():
            try:
                finished = t.check.poll()
            except JobNotStartedError:
                finished = True

            self.assertTrue(finished)

    def test_runall(self):
        self.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(4, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_system_check(self):
        self.runall(self.checks, skip_system_check=True)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_prgenv_check(self):
        self.runall(self.checks, skip_environ_check=True)

        stats = self.runner.stats
        self.assertEqual(8, stats.num_cases())
        self.assertEqual(4, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_sanity_check(self):
        self.runner.policy.skip_sanity_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(3, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(0, self._num_failures_stage('sanity'))
        self.assertEqual(1, self._num_failures_stage('performance'))

    def test_runall_skip_performance_check(self):
        self.runner.policy.skip_performance_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(3, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(0, self._num_failures_stage('performance'))

    def test_strict_performance_check(self):
        self.runner.policy.strict_check = True
        self.runall(self.checks)

        stats = self.runner.stats
        self.assertEqual(7, stats.num_cases())
        self.assertEqual(5, len(stats.failures()))
        self.assertEqual(2, self._num_failures_stage('setup'))
        self.assertEqual(1, self._num_failures_stage('sanity'))
        self.assertEqual(2, self._num_failures_stage('performance'))

    def test_force_local_execution(self):
        self.runner.policy.force_local = True
        self.runall([HelloTest()])
        stats = self.runner.stats
        for t in stats.tasks():
            self.assertTrue(t.check.local)

    def test_kbd_interrupt_within_test(self):
        check = KeyboardInterruptCheck()
        self.assertRaises(KeyboardInterrupt, self.runall, [check])
        stats = self.runner.stats
        self.assertEqual(1, len(stats.failures()))
        self.assert_all_dead()

    def test_system_exit_within_test(self):
        check = SystemExitCheck()

        # This should not raise and should not exit
        self.runall([check])
        stats = self.runner.stats
        self.assertEqual(1, len(stats.failures()))

    def test_retries_bad_check(self):
        max_retries = 2
        checks = [BadSetupCheck(), BadSetupCheckEarly()]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test was retried #max_retries times and failed.
        self.assertEqual(2, self.runner.stats.num_cases())
        self.assertEqual(max_retries, rt.runtime().current_run)
        self.assertEqual(2, len(self.runner.stats.failures()))

        # Ensure that the report does not raise any exception.
        self.runner.stats.retry_report()

    def test_retries_good_check(self):
        max_retries = 2
        checks = [HelloTest()]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test passed without retries.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(0, rt.runtime().current_run)
        self.assertEqual(0, len(self.runner.stats.failures()))

    def test_pass_in_retries(self):
        max_retries = 3
        run_to_pass = 2
        # Create a file containing the current_run; Run 0 will set it to 0,
        # run 1 to 1 and so on.
        with tempfile.NamedTemporaryFile(mode='wt', delete=False) as fp:
            fp.write('0\n')

        checks = [RetriesCheck(run_to_pass, fp.name)]
        self.runner._max_retries = max_retries
        self.runall(checks)

        # Ensure that the test passed after retries in run #run_to_pass.
        self.assertEqual(1, self.runner.stats.num_cases())
        self.assertEqual(1, len(self.runner.stats.failures(run=0)))
        self.assertEqual(run_to_pass, rt.runtime().current_run)
        self.assertEqual(0, len(self.runner.stats.failures()))
        os.remove(fp.name)

    def test_dependencies(self):
        self.loader = RegressionCheckLoader(
            ['unittests/resources/checks_unlisted/deps_complex.py'])

        # Setup the runner
        self.checks = self.loader.load_all()
        self.runall(self.checks, sort=True)

        stats = self.runner.stats
        assert stats.num_cases(0) == 8
        assert len(stats.failures()) == 2
        for tf in stats.failures():
            check = tf.testcase.check
            exc_type, exc_value, _ = tf.exc_info
            if check.name == 'T7':
                assert isinstance(exc_value, TaskDependencyError)

        # Check that cleanup is executed properly for successful tests as well
        for t in stats.tasks():
            check = t.testcase.check
            if t.failed:
                continue

            if t.ref_count == 0:
                assert os.path.exists(os.path.join(check.outputdir, 'out.txt'))

    def test_dependencies_with_retries(self):
        self.runner._max_retries = 2
        self.test_dependencies()
Exemplo n.º 30
0
 def _make_loader(check_search_path):
     return RegressionCheckLoader(check_search_path)