示例#1
0
 def test_toposort_subgraph(self):
     #
     #       t0
     #       ^
     #       |
     #   +-->t1<--+
     #   |        |
     #   t2<------t3
     #   ^        ^
     #   |        |
     #   +---t4---+
     #
     t0 = self.create_test('t0')
     t1 = self.create_test('t1')
     t2 = self.create_test('t2')
     t3 = self.create_test('t3')
     t4 = self.create_test('t4')
     t1.depends_on('t0')
     t2.depends_on('t1')
     t3.depends_on('t1')
     t3.depends_on('t2')
     t4.depends_on('t2')
     t4.depends_on('t3')
     full_deps = dependency.build_deps(
         executors.generate_testcases([t0, t1, t2, t3, t4]))
     partial_deps = dependency.build_deps(
         executors.generate_testcases([t3, t4]), full_deps)
     cases = dependency.toposort(partial_deps, is_subgraph=True)
     self.assert_topological_order(cases, partial_deps)
示例#2
0
def test_prune_deps(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')

    testcases_all = executors.generate_testcases([t0, t1, t2, t3, t4,
                                                  t5, t6, t7, t8])
    testcases = executors.generate_testcases([t3, t7])
    full_deps, _ = dependencies.build_deps(testcases_all)
    pruned_deps = dependencies.prune_deps(full_deps, testcases)

    # Check the connectivity
    assert len(pruned_deps) == 6*4
    for p in ['sys0:p0', 'sys0:p1']:
        for e in ['e0', 'e1']:
            node = functools.partial(Node, pname=p, ename=e)
            assert has_edge(pruned_deps, node('t3'), node('t2'))
            assert has_edge(pruned_deps, node('t3'), node('t1'))
            assert has_edge(pruned_deps, node('t2'), node('t1'))
            assert has_edge(pruned_deps, node('t1'), node('t0'))
            assert has_edge(pruned_deps, node('t7'), node('t5'))
            assert len(pruned_deps[node('t3')]) == 2
            assert len(pruned_deps[node('t2')]) == 1
            assert len(pruned_deps[node('t1')]) == 1
            assert len(pruned_deps[node('t7')]) == 1
            assert len(pruned_deps[node('t5')]) == 0
            assert len(pruned_deps[node('t0')]) == 0
示例#3
0
def repeat_tests(testcases, num_repeats):
    '''Returns new test cases parameterized over their repetition number'''

    tmp_registry = TestRegistry()
    unique_checks = set()
    for tc in testcases:
        check = tc.check
        if check.is_fixture() or check in unique_checks:
            continue

        unique_checks.add(check)
        cls = type(check)
        variant_info = cls.get_variant_info(
            check.variant_num, recurse=True
        )
        nc = make_test(
            f'{cls.__name__}', (cls,),
            {
                '$repeat_no': builtins.parameter(range(num_repeats))
            }
        )
        nc._rfm_custom_prefix = check.prefix
        for i in range(nc.num_variants):
            # Check if this variant should be instantiated
            vinfo = nc.get_variant_info(i, recurse=True)
            vinfo['params'].pop('$repeat_no')
            if vinfo == variant_info:
                tmp_registry.add(nc, variant_num=i)

    new_checks = tmp_registry.instantiate_all()
    return generate_testcases(new_checks)
示例#4
0
def test_skip_unresolved_deps(make_exec_ctx):
    #
    #       t0    t4
    #      ^  ^   ^
    #     /    \ /
    #    t1    t2
    #           ^
    #           |
    #          t3
    #

    make_exec_ctx(system='sys0:p0')

    t0 = make_test('t0')
    t0.valid_systems = ['sys0:p1']
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t0')
    t2.depends_on('t4')
    t3.depends_on('t2')
    deps, skipped_cases = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    assert len(skipped_cases) == 6

    skipped_tests = {c.check.name for c in skipped_cases}
    assert skipped_tests == {'t1', 't2', 't3'}
示例#5
0
    def _make_cases():
        @fixtures.custom_prefix('unittests/resources/checks')
        class _T0(rfm.RegressionTest):
            valid_systems = ['*']
            valid_prog_environs = ['*']
            sourcepath = 'hello.c'
            executable = 'echo'
            sanity_patterns = sn.assert_true(1)

            def check_and_skip(self):
                self.skip_if(True)

            # Attach the hook manually based on the request.param
            when, stage = request.param.split('_', maxsplit=1)
            hook = rfm.run_before if when == 'pre' else rfm.run_after
            check_and_skip = hook(stage)(check_and_skip)

        class _T1(rfm.RunOnlyRegressionTest):
            valid_systems = ['*']
            valid_prog_environs = ['*']
            sanity_patterns = sn.assert_true(1)

            def __init__(self):
                self.depends_on(_T0.__qualname__)

        cases = executors.generate_testcases([_T0(), _T1()])
        depgraph, _ = dependencies.build_deps(cases)
        return dependencies.toposort(depgraph), request.param
示例#6
0
def test_valid_deps(make_test, default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    dependencies.validate_deps(
        dependencies.build_deps(
            executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7,
                                          t8]))[0])
示例#7
0
 def test_valid_deps(self):
     #
     #       t0       +-->t5<--+
     #       ^        |        |
     #       |        |        |
     #   +-->t1<--+   t6       t7
     #   |        |            ^
     #   t2<------t3           |
     #   ^        ^            |
     #   |        |            t8
     #   +---t4---+
     #
     t0 = self.create_test('t0')
     t1 = self.create_test('t1')
     t2 = self.create_test('t2')
     t3 = self.create_test('t3')
     t4 = self.create_test('t4')
     t5 = self.create_test('t5')
     t6 = self.create_test('t6')
     t7 = self.create_test('t7')
     t8 = self.create_test('t8')
     t1.depends_on('t0')
     t2.depends_on('t1')
     t3.depends_on('t1')
     t3.depends_on('t2')
     t4.depends_on('t2')
     t4.depends_on('t3')
     t6.depends_on('t5')
     t7.depends_on('t5')
     t8.depends_on('t7')
     dependency.validate_deps(
         dependency.build_deps(
             executors.generate_testcases(
                 [t0, t1, t2, t3, t4, t5, t6, t7, t8])))
示例#8
0
def test_config_params(make_runner, make_exec_ctx):
    '''Test that configuration parameters are properly retrieved with the
    various execution policies.
    '''
    class T(rfm.RunOnlyRegressionTest):
        valid_systems = ['generic2']
        valid_prog_environs = ['*']
        executable = 'echo'

        @sanity_function
        def validate(self):
            return True

        @run_after('setup')
        def assert_git_timeout(self):
            expected = 10 if self.current_partition.name == 'part1' else 20
            timeout = rt.runtime().get_option('general/0/git_timeout')
            assert timeout == expected

    make_exec_ctx(system='generic2')
    runner = make_runner()
    testcases = executors.generate_testcases([T()])
    runner.runall(testcases)
    assert runner.stats.num_cases() == 2
    assert not runner.stats.failed()
示例#9
0
def test_require_deps(HelloTest, local_exec_ctx):
    import reframe.frontend.dependencies as dependencies
    import reframe.frontend.executors as executors

    @test_util.custom_prefix('unittests/resources/checks')
    class T0(HelloTest):
        x = variable(int, value=1)

    @test_util.custom_prefix('unittests/resources/checks')
    class T1(HelloTest):
        @run_after('init')
        def setdeps(self):
            self.depends_on('T0')

        @require_deps
        def sety(self, T0):
            self.y = T0().x + 1

        @run_before('run')
        @require_deps
        def setz(self, T0):
            self.z = T0().x + 2

    cases = executors.generate_testcases([T0(), T1()])
    deps, _ = dependencies.build_deps(cases)
    for c in dependencies.toposort(deps):
        _run(*c)

    for c in cases:
        t = c.check
        if t.name == 'T0':
            assert t.x == 1
        elif t.name == 'T1':
            assert t.y == 2
            assert t.z == 3
示例#10
0
 def test_toposort(self):
     #
     #       t0       +-->t5<--+
     #       ^        |        |
     #       |        |        |
     #   +-->t1<--+   t6       t7
     #   |        |            ^
     #   t2<------t3           |
     #   ^        ^            |
     #   |        |            t8
     #   +---t4---+
     #
     t0 = self.create_test('t0')
     t1 = self.create_test('t1')
     t2 = self.create_test('t2')
     t3 = self.create_test('t3')
     t4 = self.create_test('t4')
     t5 = self.create_test('t5')
     t6 = self.create_test('t6')
     t7 = self.create_test('t7')
     t8 = self.create_test('t8')
     t1.depends_on('t0')
     t2.depends_on('t1')
     t3.depends_on('t1')
     t3.depends_on('t2')
     t4.depends_on('t2')
     t4.depends_on('t3')
     t6.depends_on('t5')
     t7.depends_on('t5')
     t8.depends_on('t7')
     deps = dependency.build_deps(
         executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7, t8]))
     cases = dependency.toposort(deps)
     self.assert_topological_order(cases, deps)
示例#11
0
def test_distribute_testcases(loader, default_exec_ctx):
    testcases = executors.generate_testcases(loader.load_all())
    testcases = filter(filters.have_any_name('Simple'), testcases)

    testcases = list(testcases)
    assert len(testcases) == 4
    count = sum(map(lambda x: x.partition.fullname == 'sys0:p0', testcases))
    assert count == 2
    count = sum(map(lambda x: x.partition.fullname == 'sys0:p1', testcases))
    assert count == 2

    node_map = {'sys0:p0': ['n1', 'n2'], 'sys0:p1': ['n3']}
    new_cases = distribute_tests(testcases, node_map)
    assert len(new_cases) == 6
    count = sum(map(lambda x: x.partition.fullname == 'sys0:p0', new_cases))
    assert count == 4
    count = sum(map(lambda x: x.partition.fullname == 'sys0:p1', new_cases))
    assert count == 2

    def sys0p0_nodes():
        for nodelist in (['n2'], ['n2'], ['n1'], ['n1']):
            yield nodelist

    nodelist_iter = sys0p0_nodes()
    for tc in new_cases:
        nodes = getattr(tc.check, '$nid')
        if tc.partition.fullname == 'sys0:p0':
            assert nodes == next(nodelist_iter)
        else:
            assert nodes == ['n3']

    # Make sure we have consumed all the elements from nodelist_iter
    with pytest.raises(StopIteration):
        next(nodelist_iter)
示例#12
0
def test_skip_unresolved_deps(make_test, temp_runtime):
    #
    #       t0    t4
    #      ^  ^   ^
    #     /    \ /
    #    t1    t2
    #           ^
    #           |
    #          t3
    #

    rt = temp_runtime(fixtures.TEST_CONFIG_FILE, system='sys0:p0')
    next(rt)

    t0 = make_test('t0')
    t0.valid_systems = ['sys0:p1']
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t0')
    t2.depends_on('t4')
    t3.depends_on('t2')
    deps, skipped_cases = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    assert len(skipped_cases) == 6

    skipped_tests = {c.check.name for c in skipped_cases}
    assert skipped_tests == {'t1', 't2', 't3'}
示例#13
0
    def runall(self, checks, sort=False, *args, **kwargs):
        cases = executors.generate_testcases(checks, *args, **kwargs)
        if sort:
            depgraph = dependency.build_deps(cases)
            dependency.validate_deps(depgraph)
            cases = dependency.toposort(depgraph)

        self.runner.runall(cases)
示例#14
0
def test_build_deps_unknown_target_env(loader, exec_ctx):
    checks = loader.load_all()

    # Add some inexistent dependencies
    test0 = find_check('Test0', checks)
    test1 = find_check('Test1_default', checks)
    test1.depends_on('Test0', rfm.DEPEND_EXACT, {'e0': ['eX']})
    with pytest.raises(DependencyError):
        dependency.build_deps(executors.generate_testcases(checks))
示例#15
0
def test_eq_hash(loader, default_exec_ctx):
    cases = executors.generate_testcases(loader.load_all())
    case0 = find_case('Test0', 'e0', 'p0', cases)
    case1 = find_case('Test0', 'e1', 'p0', cases)
    case0_copy = case0.clone()

    assert case0 == case0_copy
    assert hash(case0) == hash(case0_copy)
    assert case1 != case0
    assert hash(case1) != hash(case0)
示例#16
0
    def _make_cases(checks=None, sort=False, *args, **kwargs):
        if checks is None:
            checks = make_loader(['unittests/resources/checks']).load_all()

        cases = executors.generate_testcases(checks, *args, **kwargs)
        if sort:
            depgraph, _ = dependencies.build_deps(cases)
            dependencies.validate_deps(depgraph)
            cases = dependencies.toposort(depgraph)

        return cases
示例#17
0
    def test_cyclic_deps_by_env(self):
        t0 = self.create_test('t0')
        t1 = self.create_test('t1')
        t1.depends_on('t0', rfm.DEPEND_EXACT, {'e0': ['e0']})
        t0.depends_on('t1', rfm.DEPEND_EXACT, {'e1': ['e1']})
        deps = dependency.build_deps(executors.generate_testcases([t0, t1]))
        with pytest.raises(DependencyError) as exc_info:
            dependency.validate_deps(deps)

        assert ('t1->t0->t1' in str(exc_info.value)
                or 't0->t1->t0' in str(exc_info.value))
示例#18
0
def test_cyclic_deps_by_env(make_test, default_exec_ctx):
    t0 = make_test('t0')
    t1 = make_test('t1')
    t1.depends_on('t0', udeps.env_is('e0'))
    t0.depends_on('t1', udeps.env_is('e1'))
    deps, _ = dependencies.build_deps(executors.generate_testcases([t0, t1]))
    with pytest.raises(DependencyError) as exc_info:
        dependencies.validate_deps(deps)

    assert ('t1->t0->t1' in str(exc_info.value)
            or 't0->t1->t0' in str(exc_info.value))
示例#19
0
def test_build_deps_unknown_source_env(loader, exec_ctx):
    checks = loader.load_all()

    # Add some inexistent dependencies
    test0 = find_check('Test0', checks)
    test1 = find_check('Test1_default', checks)
    test1.depends_on('Test0', rfm.DEPEND_EXACT, {'eX': ['e0']})

    # Unknown source is ignored, because it might simply be that the test
    # is not executed for eX
    deps = dependency.build_deps(executors.generate_testcases(checks))
    assert num_deps(deps, 'Test1_default') == 4
示例#20
0
    def test_eq_hash(self):
        find_case = TestDependencies.find_case
        cases = executors.generate_testcases(self.loader.load_all())

        case0 = find_case('Test0', 'e0', cases)
        case1 = find_case('Test0', 'e1', cases)
        case0_copy = case0.clone()

        assert case0 == case0_copy
        assert hash(case0) == hash(case0_copy)
        assert case1 != case0
        assert hash(case1) != hash(case0)
示例#21
0
def test_toposort_subgraph(default_exec_ctx):
    #
    #       t0
    #       ^
    #       |
    #   +-->t1<--+
    #   |        |
    #   t2<------t3
    #   ^        ^
    #   |        |
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    full_deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    partial_deps, _ = dependencies.build_deps(
        executors.generate_testcases([t3, t4]), full_deps
    )
    cases = dependencies.toposort(partial_deps, is_subgraph=True)
    assert_topological_order(cases, partial_deps)

    # Assert the level assignment
    cases_by_level = {}
    for c in cases:
        cases_by_level.setdefault(c.level, set())
        cases_by_level[c.level].add(c.check.name)

    assert cases_by_level[1] == {'t3'}
    assert cases_by_level[2] == {'t4'}
示例#22
0
def distribute_tests(testcases, node_map):
    '''Returns new testcases that will be parameterized to run in node of
    their partitions based on the nodemap
    '''
    tmp_registry = TestRegistry()

    # We don't want to register the same check for every environment
    # per partition
    check_part_combs = set()
    for tc in testcases:
        check, partition, _ = tc
        candidate_comb = (check.unique_name, partition.fullname)
        if check.is_fixture() or candidate_comb in check_part_combs:
            continue

        check_part_combs.add(candidate_comb)
        cls = type(check)
        variant_info = cls.get_variant_info(
            check.variant_num, recurse=True
        )
        nc = make_test(
            f'{cls.__name__}_{partition.fullname.replace(":", "_")}',
            (cls,),
            {
                'valid_systems': [partition.fullname],
                '$nid': builtins.parameter(
                    [[n] for n in node_map[partition.fullname]],
                    fmt=util.nodelist_abbrev
                )
            },
            methods=[
                builtins.run_before('run')(_rfm_pin_run_nodes),
                builtins.run_before('compile')(_rfm_pin_build_nodes),
                # We re-set the valid system in a hook to make sure that it
                # will not be overwriten by a parent post-init hook
                builtins.run_after('init')(
                    make_valid_systems_hook([partition.fullname])
                ),
            ]
        )

        # We have to set the prefix manually
        nc._rfm_custom_prefix = check.prefix
        for i in range(nc.num_variants):
            # Check if this variant should be instantiated
            vinfo = nc.get_variant_info(i, recurse=True)
            vinfo['params'].pop('$nid')
            if vinfo == variant_info:
                tmp_registry.add(nc, variant_num=i)

    new_checks = tmp_registry.instantiate_all()
    return generate_testcases(new_checks)
示例#23
0
def test_toposort(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4,
                                      t5, t6, t7, t8])
    )
    cases = dependencies.toposort(deps)
    assert_topological_order(cases, deps)

    # Assert the level assignment
    cases_by_level = {}
    for c in cases:
        cases_by_level.setdefault(c.level, set())
        cases_by_level[c.level].add(c.check.name)

    assert cases_by_level[0] == {'t0', 't5'}
    assert cases_by_level[1] == {'t1', 't6', 't7'}
    assert cases_by_level[2] == {'t2', 't8'}
    assert cases_by_level[3] == {'t3'}
    assert cases_by_level[4] == {'t4'}
示例#24
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    response = requests.get('https://json.schemastore.org/gitlab-ci')
    assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
示例#25
0
def test_build_deps_unknown_test(loader, exec_ctx):
    checks = loader.load_all()

    # Add some inexistent dependencies
    test0 = find_check('Test0', checks)
    for depkind in ('default', 'fully', 'by_env', 'exact'):
        test1 = find_check('Test1_' + depkind, checks)
        if depkind == 'default':
            test1.depends_on('TestX')
        elif depkind == 'exact':
            test1.depends_on('TestX', rfm.DEPEND_EXACT, {'e0': ['e0']})
        elif depkind == 'fully':
            test1.depends_on('TestX', rfm.DEPEND_FULLY)
        elif depkind == 'by_env':
            test1.depends_on('TestX', rfm.DEPEND_BY_ENV)

        with pytest.raises(DependencyError):
            dependency.build_deps(executors.generate_testcases(checks))
示例#26
0
def test_cyclic_deps(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |   |    |            ^
    #   t2  |    t3           |
    #   ^   |    ^            |
    #   |   v    |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t1.depends_on('t4')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4,
                                      t5, t6, t7, t8])
    )

    with pytest.raises(DependencyError) as exc_info:
        dependencies.validate_deps(deps)

    assert ('t4->t2->t1->t4' in str(exc_info.value) or
            't2->t1->t4->t2' in str(exc_info.value) or
            't1->t4->t2->t1' in str(exc_info.value) or
            't1->t4->t3->t1' in str(exc_info.value) or
            't4->t3->t1->t4' in str(exc_info.value) or
            't3->t1->t4->t3' in str(exc_info.value))
示例#27
0
    def test_require_deps(self):
        import unittests.resources.checks.hellocheck as mod
        import reframe.frontend.dependency as dependency
        import reframe.frontend.executors as executors

        class T0(mod.HelloTest):
            def __init__(self):
                super().__init__()
                self._prefix = 'unittests/resources/checks'
                self.name = type(self).__name__
                self.executable = os.path.join('.', self.name)
                self.x = 1

        class T1(mod.HelloTest):
            def __init__(self):
                super().__init__()
                self._prefix = 'unittests/resources/checks'
                self.name = type(self).__name__
                self.executable = os.path.join('.', self.name)
                self.depends_on('T0')

            @rfm.require_deps
            def sety(self, T0):
                self.y = T0().x + 1

            @rfm.run_before('run')
            @rfm.require_deps
            def setz(self, T0):
                self.z = T0().x + 2

        cases = executors.generate_testcases([T0(), T1()])
        deps = dependency.build_deps(cases)
        for c in dependency.toposort(deps):
            _run(*c)

        for c in cases:
            t = c.check
            if t.name == 'T0':
                assert t.x == 1
            elif t.name == 'T1':
                assert t.y == 2
                assert t.z == 3
def test_require_deps(HelloTest, local_exec_ctx):
    import reframe.frontend.dependencies as dependencies
    import reframe.frontend.executors as executors

    @test_util.custom_prefix('unittests/resources/checks')
    class T0(HelloTest):
        def __init__(self):
            super().__init__()
            self.name = type(self).__name__
            self.executable = os.path.join('.', self.name)
            self.x = 1

    @test_util.custom_prefix('unittests/resources/checks')
    class T1(HelloTest):
        def __init__(self):
            super().__init__()
            self.name = type(self).__name__
            self.executable = os.path.join('.', self.name)
            self.depends_on('T0')

        @require_deps
        def sety(self, T0):
            self.y = T0().x + 1

        @run_before('run')
        @require_deps
        def setz(self, T0):
            self.z = T0().x + 2

    cases = executors.generate_testcases([T0(), T1()])
    deps, _ = dependencies.build_deps(cases)
    for c in dependencies.toposort(deps):
        _run(*c)

    for c in cases:
        t = c.check
        if t.name == 'T0':
            assert t.x == 1
        elif t.name == 'T1':
            assert t.y == 2
            assert t.z == 3
示例#29
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    try:
        response = requests.get(
            'https://gitlab.com/gitlab-org/gitlab/-/raw/master/app/assets/javascripts/editor/schema/ci.json'  # noqa: E501
        )
    except requests.exceptions.ConnectionError as e:
        pytest.skip(f'could not reach URL: {e}')
    else:
        assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
示例#30
0
文件: cli.py 项目: ikirker/reframe
def main():
    # Setup command line options
    argparser = argparse.ArgumentParser()
    output_options = argparser.add_argument_group('Options controlling output')
    locate_options = argparser.add_argument_group(
        'Options for locating checks')
    select_options = argparser.add_argument_group(
        'Options for selecting checks')
    action_options = argparser.add_argument_group(
        'Options controlling actions')
    run_options = argparser.add_argument_group(
        'Options controlling execution of checks')
    env_options = argparser.add_argument_group(
        'Options controlling environment')
    misc_options = argparser.add_argument_group('Miscellaneous options')

    # Output directory options
    output_options.add_argument('--prefix',
                                action='store',
                                metavar='DIR',
                                help='Set output directory prefix to DIR')
    output_options.add_argument('-o',
                                '--output',
                                action='store',
                                metavar='DIR',
                                help='Set output directory to DIR')
    output_options.add_argument('-s',
                                '--stage',
                                action='store',
                                metavar='DIR',
                                help='Set stage directory to DIR')
    output_options.add_argument(
        '--perflogdir',
        action='store',
        metavar='DIR',
        help='Set directory prefix for the performance logs '
        '(default: ${prefix}/perflogs, '
        'relevant only if the filelog backend is used)')
    output_options.add_argument(
        '--keep-stage-files',
        action='store_true',
        help='Keep stage directory even if check is successful')
    output_options.add_argument(
        '--save-log-files',
        action='store_true',
        default=False,
        help='Copy the log file from the work dir to the output dir at the '
        'end of the program')

    # Check discovery options
    locate_options.add_argument('-c',
                                '--checkpath',
                                action='append',
                                metavar='DIR|FILE',
                                help='Search for checks in DIR or FILE')
    locate_options.add_argument('-R',
                                '--recursive',
                                action='store_true',
                                help='Load checks recursively')
    locate_options.add_argument('--ignore-check-conflicts',
                                action='store_true',
                                help='Skip checks with conflicting names')

    # Select options
    select_options.add_argument('-t',
                                '--tag',
                                action='append',
                                dest='tags',
                                default=[],
                                help='Select checks matching TAG')
    select_options.add_argument('-n',
                                '--name',
                                action='append',
                                dest='names',
                                default=[],
                                metavar='NAME',
                                help='Select checks with NAME')
    select_options.add_argument('-x',
                                '--exclude',
                                action='append',
                                dest='exclude_names',
                                metavar='NAME',
                                default=[],
                                help='Exclude checks with NAME')
    select_options.add_argument(
        '-p',
        '--prgenv',
        action='append',
        default=[r'.*'],
        help='Select tests for PRGENV programming environment only')
    select_options.add_argument('--gpu-only',
                                action='store_true',
                                help='Select only GPU tests')
    select_options.add_argument('--cpu-only',
                                action='store_true',
                                help='Select only CPU tests')

    # Action options
    action_options.add_argument('-l',
                                '--list',
                                action='store_true',
                                help='List matched regression checks')
    action_options.add_argument(
        '-L',
        '--list-detailed',
        action='store_true',
        help='List matched regression checks with a detailed description')
    action_options.add_argument('-r',
                                '--run',
                                action='store_true',
                                help='Run regression with the selected checks')

    # Run options
    run_options.add_argument('-A',
                             '--account',
                             action='store',
                             help='Use ACCOUNT for submitting jobs')
    run_options.add_argument('-P',
                             '--partition',
                             action='store',
                             metavar='PART',
                             help='Use PART for submitting jobs')
    run_options.add_argument('--reservation',
                             action='store',
                             metavar='RES',
                             help='Use RES for submitting jobs')
    run_options.add_argument('--nodelist',
                             action='store',
                             help='Run checks on the selected list of nodes')
    run_options.add_argument(
        '--exclude-nodes',
        action='store',
        metavar='NODELIST',
        help='Exclude the list of nodes from running checks')
    run_options.add_argument('--job-option',
                             action='append',
                             metavar='OPT',
                             dest='job_options',
                             default=[],
                             help='Pass OPT to job scheduler')
    run_options.add_argument('--force-local',
                             action='store_true',
                             help='Force local execution of checks')
    run_options.add_argument('--skip-sanity-check',
                             action='store_true',
                             help='Skip sanity checking')
    run_options.add_argument('--skip-performance-check',
                             action='store_true',
                             help='Skip performance checking')
    run_options.add_argument('--strict',
                             action='store_true',
                             help='Force strict performance checking')
    run_options.add_argument('--skip-system-check',
                             action='store_true',
                             help='Skip system check')
    run_options.add_argument('--skip-prgenv-check',
                             action='store_true',
                             help='Skip prog. environment check')
    run_options.add_argument(
        '--exec-policy',
        metavar='POLICY',
        action='store',
        choices=['serial', 'async'],
        default='serial',
        help='Specify the execution policy for running the regression tests. '
        'Available policies: "serial" (default), "async"')
    run_options.add_argument('--mode',
                             action='store',
                             help='Execution mode to use')
    run_options.add_argument(
        '--max-retries',
        metavar='NUM',
        action='store',
        default=0,
        help='Specify the maximum number of times a failed regression test '
        'may be retried (default: 0)')
    run_options.add_argument(
        '--flex-alloc-tasks',
        action='store',
        dest='flex_alloc_tasks',
        metavar='{all|idle|NUM}',
        default='idle',
        help="Strategy for flexible task allocation (default: 'idle').")

    env_options.add_argument('-M',
                             '--map-module',
                             action='append',
                             metavar='MAPPING',
                             dest='module_mappings',
                             default=[],
                             help='Apply a single module mapping')
    env_options.add_argument(
        '-m',
        '--module',
        action='append',
        default=[],
        metavar='MOD',
        dest='user_modules',
        help='Load module MOD before running the regression suite')
    env_options.add_argument('--module-mappings',
                             action='store',
                             metavar='FILE',
                             dest='module_map_file',
                             help='Apply module mappings defined in FILE')
    env_options.add_argument(
        '-u',
        '--unload-module',
        action='append',
        metavar='MOD',
        dest='unload_modules',
        default=[],
        help='Unload module MOD before running the regression suite')
    env_options.add_argument(
        '--purge-env',
        action='store_true',
        dest='purge_env',
        default=False,
        help='Purge environment before running the regression suite')

    # Miscellaneous options
    misc_options.add_argument(
        '-C',
        '--config-file',
        action='store',
        dest='config_file',
        metavar='FILE',
        default=os.path.join(reframe.INSTALL_PREFIX, 'reframe/settings.py'),
        help='Specify a custom config-file for the machine. '
        '(default: %s' %
        os.path.join(reframe.INSTALL_PREFIX, 'reframe/settings.py'))
    misc_options.add_argument('--nocolor',
                              action='store_false',
                              dest='colorize',
                              default=True,
                              help='Disable coloring of output')
    misc_options.add_argument('--performance-report',
                              action='store_true',
                              help='Print the performance report')

    # FIXME: This should move to env_options as soon as
    # https://github.com/eth-cscs/reframe/pull/946 is merged
    misc_options.add_argument('--non-default-craype',
                              action='store_true',
                              default=False,
                              help='Test a non-default Cray PE')
    misc_options.add_argument(
        '--show-config',
        action='store_true',
        help='Print configuration of the current system and exit')
    misc_options.add_argument(
        '--show-config-env',
        action='store',
        metavar='ENV',
        help='Print configuration of environment ENV and exit')
    misc_options.add_argument('--system',
                              action='store',
                              help='Load SYSTEM configuration explicitly')
    misc_options.add_argument(
        '--timestamp',
        action='store',
        nargs='?',
        const='%FT%T',
        metavar='TIMEFMT',
        help='Append a timestamp component to the regression directories'
        '(default format "%%FT%%T")')
    misc_options.add_argument('-V',
                              '--version',
                              action='version',
                              version=reframe.VERSION)
    misc_options.add_argument('-v',
                              '--verbose',
                              action='count',
                              default=0,
                              help='Increase verbosity level of output')

    if len(sys.argv) == 1:
        argparser.print_help()
        sys.exit(1)

    # Parse command line
    options = argparser.parse_args()

    # Load configuration
    try:
        settings = config.load_settings_from_file(options.config_file)
    except (OSError, ReframeError) as e:
        sys.stderr.write('%s: could not load settings: %s\n' %
                         (sys.argv[0], e))
        sys.exit(1)

    # Configure logging
    try:
        logging.configure_logging(settings.logging_config),
    except (OSError, ConfigError) as e:
        sys.stderr.write('could not configure logging: %s\n' % e)
        sys.exit(1)

    # Set colors in logger
    logging.getlogger().colorize = options.colorize

    # Setup printer
    printer = PrettyPrinter()
    printer.colorize = options.colorize
    if options.verbose:
        printer.inc_verbosity(options.verbose)

    try:
        runtime.init_runtime(settings.site_configuration,
                             options.system,
                             non_default_craype=options.non_default_craype)
    except SystemAutodetectionError:
        printer.warning(
            'could not find a configuration entry for the current system; '
            'falling back to a generic system configuration; '
            'please check the online documentation on how to configure '
            'ReFrame for your system.')
        settings.site_configuration['systems'] = {
            'generic': {
                'descr': 'Generic fallback system configuration',
                'hostnames': ['localhost'],
                'partitions': {
                    'login': {
                        'scheduler': 'local',
                        'environs': ['builtin-gcc'],
                        'descr': 'Login nodes'
                    }
                }
            }
        }
        settings.site_configuration['environments'] = {
            '*': {
                'builtin-gcc': {
                    'type': 'ProgEnvironment',
                    'cc': 'gcc',
                    'cxx': 'g++',
                    'ftn': 'gfortran',
                }
            }
        }
        runtime.init_runtime(settings.site_configuration,
                             'generic',
                             non_default_craype=options.non_default_craype)
    except Exception as e:
        printer.error('configuration error: %s' % e)
        printer.verbose(''.join(traceback.format_exception(*sys.exc_info())))
        sys.exit(1)

    rt = runtime.runtime()
    try:
        if options.module_map_file:
            rt.modules_system.load_mapping_from_file(options.module_map_file)

        if options.module_mappings:
            for m in options.module_mappings:
                rt.modules_system.load_mapping(m)

    except (ConfigError, OSError) as e:
        printer.error('could not load module mappings: %s' % e)
        sys.exit(1)

    if options.mode:
        try:
            mode_args = rt.mode(options.mode)

            # Parse the mode's options and reparse the command-line
            options = argparser.parse_args(mode_args)
            options = argparser.parse_args(namespace=options)
        except ConfigError as e:
            printer.error('could not obtain execution mode: %s' % e)
            sys.exit(1)

    # Adjust system directories
    if options.prefix:
        # if prefix is set, reset all other directories
        rt.resources.prefix = os_ext.expandvars(options.prefix)
        rt.resources.outputdir = None
        rt.resources.stagedir = None

    if options.output:
        rt.resources.outputdir = os_ext.expandvars(options.output)

    if options.stage:
        rt.resources.stagedir = os_ext.expandvars(options.stage)

    if (os_ext.samefile(rt.resources.stage_prefix, rt.resources.output_prefix)
            and not options.keep_stage_files):
        printer.error('stage and output refer to the same directory; '
                      'if this is on purpose, please use also the '
                      "`--keep-stage-files' option.")
        sys.exit(1)

    if options.timestamp:
        rt.resources.timefmt = options.timestamp

    # Configure performance logging
    # NOTE: we need resources to be configured in order to set the global
    # perf. logging prefix correctly
    if options.perflogdir:
        rt.resources.perflogdir = os_ext.expandvars(options.perflogdir)

    logging.LOG_CONFIG_OPTS['handlers.filelog.prefix'] = (
        rt.resources.perflog_prefix)

    # Show configuration after everything is set up
    if options.show_config:
        printer.info(rt.show_config())
        sys.exit(0)

    if options.show_config_env:
        envname = options.show_config_env
        for p in rt.system.partitions:
            environ = p.environment(envname)
            if environ:
                break

        if environ is None:
            printer.error('no such environment: ' + envname)
            sys.exit(1)

        printer.info(environ.details())
        sys.exit(0)

    if hasattr(settings, 'perf_logging_config'):
        try:
            logging.configure_perflogging(settings.perf_logging_config)
        except (OSError, ConfigError) as e:
            printer.error('could not configure performance logging: %s\n' % e)
            sys.exit(1)
    else:
        printer.warning('no performance logging is configured; '
                        'please check documentation')

    # Setup the check loader
    if options.checkpath:
        load_path = []
        for d in options.checkpath:
            d = os_ext.expandvars(d)
            if not os.path.exists(d):
                printer.warning("%s: path `%s' does not exist. Skipping..." %
                                (argparser.prog, d))
                continue

            load_path.append(d)

        loader = RegressionCheckLoader(
            load_path,
            recurse=options.recursive,
            ignore_conflicts=options.ignore_check_conflicts)
    else:
        loader = RegressionCheckLoader(load_path=settings.checks_path,
                                       prefix=reframe.INSTALL_PREFIX,
                                       recurse=settings.checks_path_recurse)

    printer.debug(argparse.format_options(options))

    # Print command line
    printer.info('Command line: %s' % ' '.join(sys.argv))
    printer.info('Reframe version: ' + reframe.VERSION)
    printer.info('Launched by user: '******'<unknown>'))
    printer.info('Launched on host: ' + socket.gethostname())

    # Print important paths
    printer.info('Reframe paths')
    printer.info('=============')
    printer.info('    Check prefix      : %s' % loader.prefix)
    printer.info(
        '%03s Check search path : %s' %
        ('(R)' if loader.recurse else '', "'%s'" % ':'.join(loader.load_path)))
    printer.info('    Stage dir prefix     : %s' % rt.resources.stage_prefix)
    printer.info('    Output dir prefix    : %s' % rt.resources.output_prefix)
    printer.info(
        '    Perf. logging prefix : %s' %
        os.path.abspath(logging.LOG_CONFIG_OPTS['handlers.filelog.prefix']))
    try:
        # Locate and load checks
        try:
            checks_found = loader.load_all()
        except OSError as e:
            raise ReframeError from e

        # Filter checks by name
        checks_matched = checks_found
        if options.exclude_names:
            for name in options.exclude_names:
                checks_matched = filter(filters.have_not_name(name),
                                        checks_matched)

        if options.names:
            checks_matched = filter(filters.have_name('|'.join(options.names)),
                                    checks_matched)

        # Filter checks by tags
        for tag in options.tags:
            checks_matched = filter(filters.have_tag(tag), checks_matched)

        # Filter checks by prgenv
        if not options.skip_prgenv_check:
            for prgenv in options.prgenv:
                checks_matched = filter(filters.have_prgenv(prgenv),
                                        checks_matched)

        # Filter checks by system
        if not options.skip_system_check:
            checks_matched = filter(
                filters.have_partition(rt.system.partitions), checks_matched)

        # Filter checks further
        if options.gpu_only and options.cpu_only:
            printer.error("options `--gpu-only' and `--cpu-only' "
                          "are mutually exclusive")
            sys.exit(1)

        if options.gpu_only:
            checks_matched = filter(filters.have_gpu_only(), checks_matched)
        elif options.cpu_only:
            checks_matched = filter(filters.have_cpu_only(), checks_matched)

        # Determine the allowed programming environments
        allowed_environs = {
            e.name
            for env_patt in options.prgenv for p in rt.system.partitions
            for e in p.environs if re.match(env_patt, e.name)
        }

        # Generate the test cases, validate dependencies and sort them
        checks_matched = list(checks_matched)
        testcases = generate_testcases(checks_matched,
                                       options.skip_system_check,
                                       options.skip_prgenv_check,
                                       allowed_environs)
        testgraph = dependency.build_deps(testcases)
        dependency.validate_deps(testgraph)
        testcases = dependency.toposort(testgraph)

        # Unload regression's module and load user-specified modules
        if hasattr(settings, 'reframe_module'):
            printer.warning(
                "the 'reframe_module' configuration option will be ignored; "
                "please use the '-u' or '--unload-module' options")

        if options.purge_env:
            rt.modules_system.unload_all()
        else:
            for m in options.unload_modules:
                rt.modules_system.unload_module(m)

        # Load the environment for the current system
        try:
            env.load(rt.system.preload_environ)
        except EnvironError as e:
            printer.error("failed to load current system's environment; "
                          "please check your configuration")
            printer.debug(str(e))
            raise

        for m in options.user_modules:
            try:
                rt.modules_system.load_module(m, force=True)
            except EnvironError as e:
                printer.warning("could not load module '%s' correctly: "
                                "Skipping..." % m)
                printer.debug(str(e))

        # Act on checks

        success = True
        if options.list:
            # List matched checks
            list_checks(list(checks_matched), printer)
        elif options.list_detailed:
            # List matched checks with details
            list_checks(list(checks_matched), printer, detailed=True)

        elif options.run:
            # Setup the execution policy
            if options.exec_policy == 'serial':
                exec_policy = SerialExecutionPolicy()
            elif options.exec_policy == 'async':
                exec_policy = AsynchronousExecutionPolicy()
            else:
                # This should not happen, since choices are handled by
                # argparser
                printer.error("unknown execution policy `%s': Exiting...")
                sys.exit(1)

            exec_policy.skip_system_check = options.skip_system_check
            exec_policy.force_local = options.force_local
            exec_policy.strict_check = options.strict
            exec_policy.skip_sanity_check = options.skip_sanity_check
            exec_policy.skip_performance_check = options.skip_performance_check
            exec_policy.keep_stage_files = options.keep_stage_files
            try:
                errmsg = "invalid option for --flex-alloc-tasks: '{0}'"
                sched_flex_alloc_tasks = int(options.flex_alloc_tasks)
                if sched_flex_alloc_tasks <= 0:
                    raise ConfigError(errmsg.format(options.flex_alloc_tasks))
            except ValueError:
                if not options.flex_alloc_tasks.lower() in {'idle', 'all'}:
                    raise ConfigError(errmsg.format(
                        options.flex_alloc_tasks)) from None

                sched_flex_alloc_tasks = options.flex_alloc_tasks

            exec_policy.sched_flex_alloc_tasks = sched_flex_alloc_tasks
            exec_policy.flex_alloc_tasks = options.flex_alloc_tasks
            exec_policy.sched_account = options.account
            exec_policy.sched_partition = options.partition
            exec_policy.sched_reservation = options.reservation
            exec_policy.sched_nodelist = options.nodelist
            exec_policy.sched_exclude_nodelist = options.exclude_nodes
            exec_policy.sched_options = options.job_options
            try:
                max_retries = int(options.max_retries)
            except ValueError:
                raise ConfigError('--max-retries is not a valid integer: %s' %
                                  max_retries) from None
            runner = Runner(exec_policy, printer, max_retries)
            try:
                runner.runall(testcases)
            finally:
                # Print a retry report if we did any retries
                if runner.stats.failures(run=0):
                    printer.info(runner.stats.retry_report())

                # Print a failure report if we had failures in the last run
                if runner.stats.failures():
                    printer.info(runner.stats.failure_report())
                    success = False

                if options.performance_report:
                    printer.info(runner.stats.performance_report())

        else:
            printer.info('No action specified. Exiting...')
            printer.info("Try `%s -h' for a list of available actions." %
                         argparser.prog)
            sys.exit(1)

        if not success:
            sys.exit(1)

        sys.exit(0)

    except KeyboardInterrupt:
        sys.exit(1)
    except ReframeError as e:
        printer.error(str(e))
        sys.exit(1)
    except (Exception, ReframeFatalError):
        printer.error(format_exception(*sys.exc_info()))
        sys.exit(1)
    finally:
        try:
            if options.save_log_files:
                logging.save_log_files(rt.resources.output_prefix)

        except OSError as e:
            printer.error('could not save log file: %s' % e)
            sys.exit(1)