示例#1
0
def test_restore_session(report_file, make_runner, dep_cases, common_exec_ctx,
                         tmp_path):
    # Select a single test to run and create the pruned graph
    selected = [tc for tc in dep_cases if tc.check.name == 'T1']
    testgraph = dependencies.prune_deps(dependencies.build_deps(dep_cases)[0],
                                        selected,
                                        max_depth=1)

    # Restore the required test cases
    report = runreport.load_report(report_file)
    testgraph, restored_cases = report.restore_dangling(testgraph)

    assert {tc.check.name for tc in restored_cases} == {'T4', 'T5'}

    # Run the selected test cases
    runner = make_runner()
    with timer() as tm:
        runner.runall(selected, restored_cases)

    new_report = _generate_runreport(runner.stats.json(), *tm.timestamps())
    assert new_report['runs'][0]['num_cases'] == 1
    assert new_report['runs'][0]['testcases'][0]['name'] == 'T1'

    # Remove the test case dump file and retry
    os.remove(tmp_path / 'stage' / 'generic' / 'default' / 'builtin' / 'T4' /
              '.rfm_testcase.json')

    with pytest.raises(ReframeError, match=r'could not restore testcase'):
        report.restore_dangling(testgraph)
示例#2
0
def test_valid_deps(make_test, default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    dependencies.validate_deps(
        dependencies.build_deps(
            executors.generate_testcases([t0, t1, t2, t3, t4, t5, t6, t7,
                                          t8]))[0])
示例#3
0
def test_skip_unresolved_deps(make_test, temp_runtime):
    #
    #       t0    t4
    #      ^  ^   ^
    #     /    \ /
    #    t1    t2
    #           ^
    #           |
    #          t3
    #

    rt = temp_runtime(fixtures.TEST_CONFIG_FILE, system='sys0:p0')
    next(rt)

    t0 = make_test('t0')
    t0.valid_systems = ['sys0:p1']
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t0')
    t2.depends_on('t4')
    t3.depends_on('t2')
    deps, skipped_cases = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    assert len(skipped_cases) == 6

    skipped_tests = {c.check.name for c in skipped_cases}
    assert skipped_tests == {'t1', 't2', 't3'}
示例#4
0
def test_require_deps(HelloTest, local_exec_ctx):
    import reframe.frontend.dependencies as dependencies
    import reframe.frontend.executors as executors

    @test_util.custom_prefix('unittests/resources/checks')
    class T0(HelloTest):
        x = variable(int, value=1)

    @test_util.custom_prefix('unittests/resources/checks')
    class T1(HelloTest):
        @run_after('init')
        def setdeps(self):
            self.depends_on('T0')

        @require_deps
        def sety(self, T0):
            self.y = T0().x + 1

        @run_before('run')
        @require_deps
        def setz(self, T0):
            self.z = T0().x + 2

    cases = executors.generate_testcases([T0(), T1()])
    deps, _ = dependencies.build_deps(cases)
    for c in dependencies.toposort(deps):
        _run(*c)

    for c in cases:
        t = c.check
        if t.name == 'T0':
            assert t.x == 1
        elif t.name == 'T1':
            assert t.y == 2
            assert t.z == 3
示例#5
0
def test_skip_unresolved_deps(make_exec_ctx):
    #
    #       t0    t4
    #      ^  ^   ^
    #     /    \ /
    #    t1    t2
    #           ^
    #           |
    #          t3
    #

    make_exec_ctx(system='sys0:p0')

    t0 = make_test('t0')
    t0.valid_systems = ['sys0:p1']
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t0')
    t2.depends_on('t4')
    t3.depends_on('t2')
    deps, skipped_cases = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    assert len(skipped_cases) == 6

    skipped_tests = {c.check.name for c in skipped_cases}
    assert skipped_tests == {'t1', 't2', 't3'}
示例#6
0
    def _make_cases():
        @fixtures.custom_prefix('unittests/resources/checks')
        class _T0(rfm.RegressionTest):
            valid_systems = ['*']
            valid_prog_environs = ['*']
            sourcepath = 'hello.c'
            executable = 'echo'
            sanity_patterns = sn.assert_true(1)

            def check_and_skip(self):
                self.skip_if(True)

            # Attach the hook manually based on the request.param
            when, stage = request.param.split('_', maxsplit=1)
            hook = rfm.run_before if when == 'pre' else rfm.run_after
            check_and_skip = hook(stage)(check_and_skip)

        class _T1(rfm.RunOnlyRegressionTest):
            valid_systems = ['*']
            valid_prog_environs = ['*']
            sanity_patterns = sn.assert_true(1)

            def __init__(self):
                self.depends_on(_T0.__qualname__)

        cases = executors.generate_testcases([_T0(), _T1()])
        depgraph, _ = dependencies.build_deps(cases)
        return dependencies.toposort(depgraph), request.param
示例#7
0
    def _make_cases(checks=None, sort=False, *args, **kwargs):
        if checks is None:
            checks = make_loader(['unittests/resources/checks']).load_all()

        cases = executors.generate_testcases(checks, *args, **kwargs)
        if sort:
            depgraph, _ = dependencies.build_deps(cases)
            dependencies.validate_deps(depgraph)
            cases = dependencies.toposort(depgraph)

        return cases
示例#8
0
def test_cyclic_deps_by_env(make_test, default_exec_ctx):
    t0 = make_test('t0')
    t1 = make_test('t1')
    t1.depends_on('t0', udeps.env_is('e0'))
    t0.depends_on('t1', udeps.env_is('e1'))
    deps, _ = dependencies.build_deps(executors.generate_testcases([t0, t1]))
    with pytest.raises(DependencyError) as exc_info:
        dependencies.validate_deps(deps)

    assert ('t1->t0->t1' in str(exc_info.value)
            or 't0->t1->t0' in str(exc_info.value))
示例#9
0
def test_prune_deps(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')

    testcases_all = executors.generate_testcases([t0, t1, t2, t3, t4,
                                                  t5, t6, t7, t8])
    testcases = executors.generate_testcases([t3, t7])
    full_deps, _ = dependencies.build_deps(testcases_all)
    pruned_deps = dependencies.prune_deps(full_deps, testcases)

    # Check the connectivity
    assert len(pruned_deps) == 6*4
    for p in ['sys0:p0', 'sys0:p1']:
        for e in ['e0', 'e1']:
            node = functools.partial(Node, pname=p, ename=e)
            assert has_edge(pruned_deps, node('t3'), node('t2'))
            assert has_edge(pruned_deps, node('t3'), node('t1'))
            assert has_edge(pruned_deps, node('t2'), node('t1'))
            assert has_edge(pruned_deps, node('t1'), node('t0'))
            assert has_edge(pruned_deps, node('t7'), node('t5'))
            assert len(pruned_deps[node('t3')]) == 2
            assert len(pruned_deps[node('t2')]) == 1
            assert len(pruned_deps[node('t1')]) == 1
            assert len(pruned_deps[node('t7')]) == 1
            assert len(pruned_deps[node('t5')]) == 0
            assert len(pruned_deps[node('t0')]) == 0
示例#10
0
def test_toposort_subgraph(default_exec_ctx):
    #
    #       t0
    #       ^
    #       |
    #   +-->t1<--+
    #   |        |
    #   t2<------t3
    #   ^        ^
    #   |        |
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    full_deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4])
    )
    partial_deps, _ = dependencies.build_deps(
        executors.generate_testcases([t3, t4]), full_deps
    )
    cases = dependencies.toposort(partial_deps, is_subgraph=True)
    assert_topological_order(cases, partial_deps)

    # Assert the level assignment
    cases_by_level = {}
    for c in cases:
        cases_by_level.setdefault(c.level, set())
        cases_by_level[c.level].add(c.check.name)

    assert cases_by_level[1] == {'t3'}
    assert cases_by_level[2] == {'t4'}
示例#11
0
def test_restore_session(report_file, make_runner, dep_cases, common_exec_ctx,
                         tmp_path):
    # Select a single test to run and create the pruned graph
    selected = [tc for tc in dep_cases if tc.check.name == 'T1']
    testgraph = dependencies.prune_deps(dependencies.build_deps(dep_cases)[0],
                                        selected,
                                        max_depth=1)

    # Restore the required test cases
    report = runreport.load_report(report_file)
    testgraph, restored_cases = report.restore_dangling(testgraph)

    assert {tc.check.name for tc in restored_cases} == {'T4', 'T5'}

    # Run the selected test cases
    runner = make_runner()
    with timer() as tm:
        runner.runall(selected, restored_cases)

    new_report = _generate_runreport(runner.stats.json(), *tm.timestamps())
    assert new_report['runs'][0]['num_cases'] == 1
    assert new_report['runs'][0]['testcases'][0]['name'] == 'T1'

    # Generate an empty report and load it as primary with the original report
    # as a fallback, in order to test if the dependencies are still resolved
    # correctly
    empty_report = tmp_path / 'empty.json'

    with open(empty_report, 'w') as fp:
        empty_run = [{
            'num_cases': 0,
            'num_failures': 0,
            'num_aborted': 0,
            'num_skipped': 0,
            'runid': 0,
            'testcases': []
        }]
        jsonext.dump(_generate_runreport(empty_run, *tm.timestamps()), fp)

    report2 = runreport.load_report(empty_report, report_file)
    restored_cases = report2.restore_dangling(testgraph)[1]
    assert {tc.check.name for tc in restored_cases} == {'T4', 'T5'}

    # Remove the test case dump file and retry
    os.remove(tmp_path / 'stage' / 'generic' / 'default' / 'builtin' / 'T4' /
              '.rfm_testcase.json')

    with pytest.raises(ReframeError, match=r'could not restore testcase'):
        report.restore_dangling(testgraph)
示例#12
0
def test_toposort(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |        |            ^
    #   t2<------t3           |
    #   ^        ^            |
    #   |        |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t3.depends_on('t2')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4,
                                      t5, t6, t7, t8])
    )
    cases = dependencies.toposort(deps)
    assert_topological_order(cases, deps)

    # Assert the level assignment
    cases_by_level = {}
    for c in cases:
        cases_by_level.setdefault(c.level, set())
        cases_by_level[c.level].add(c.check.name)

    assert cases_by_level[0] == {'t0', 't5'}
    assert cases_by_level[1] == {'t1', 't6', 't7'}
    assert cases_by_level[2] == {'t2', 't8'}
    assert cases_by_level[3] == {'t3'}
    assert cases_by_level[4] == {'t4'}
示例#13
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    response = requests.get('https://json.schemastore.org/gitlab-ci')
    assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
示例#14
0
    def _retry_failed(self, cases):
        rt = runtime.runtime()
        failures = self._stats.failed()
        while (failures and rt.current_run < self._max_retries):
            num_failed_checks = len({tc.check.name for tc in failures})
            rt.next_run()

            self._printer.separator(
                'short double line',
                'Retrying %d failed check(s) (retry %d/%d)' %
                (num_failed_checks, rt.current_run, self._max_retries))

            # Clone failed cases and rebuild dependencies among them
            failed_cases = [t.testcase.clone() for t in failures]
            cases_graph, _ = dependencies.build_deps(failed_cases, cases)
            failed_cases = dependencies.toposort(cases_graph, is_subgraph=True)
            self._runall(failed_cases)
            failures = self._stats.failed()
示例#15
0
def test_cyclic_deps(default_exec_ctx):
    #
    #       t0       +-->t5<--+
    #       ^        |        |
    #       |        |        |
    #   +-->t1<--+   t6       t7
    #   |   |    |            ^
    #   t2  |    t3           |
    #   ^   |    ^            |
    #   |   v    |            t8
    #   +---t4---+
    #
    t0 = make_test('t0')
    t1 = make_test('t1')
    t2 = make_test('t2')
    t3 = make_test('t3')
    t4 = make_test('t4')
    t5 = make_test('t5')
    t6 = make_test('t6')
    t7 = make_test('t7')
    t8 = make_test('t8')
    t1.depends_on('t0')
    t1.depends_on('t4')
    t2.depends_on('t1')
    t3.depends_on('t1')
    t4.depends_on('t2')
    t4.depends_on('t3')
    t6.depends_on('t5')
    t7.depends_on('t5')
    t8.depends_on('t7')
    deps, _ = dependencies.build_deps(
        executors.generate_testcases([t0, t1, t2, t3, t4,
                                      t5, t6, t7, t8])
    )

    with pytest.raises(DependencyError) as exc_info:
        dependencies.validate_deps(deps)

    assert ('t4->t2->t1->t4' in str(exc_info.value) or
            't2->t1->t4->t2' in str(exc_info.value) or
            't1->t4->t2->t1' in str(exc_info.value) or
            't1->t4->t3->t1' in str(exc_info.value) or
            't4->t3->t1->t4' in str(exc_info.value) or
            't3->t1->t4->t3' in str(exc_info.value))
def test_require_deps(HelloTest, local_exec_ctx):
    import reframe.frontend.dependencies as dependencies
    import reframe.frontend.executors as executors

    @test_util.custom_prefix('unittests/resources/checks')
    class T0(HelloTest):
        def __init__(self):
            super().__init__()
            self.name = type(self).__name__
            self.executable = os.path.join('.', self.name)
            self.x = 1

    @test_util.custom_prefix('unittests/resources/checks')
    class T1(HelloTest):
        def __init__(self):
            super().__init__()
            self.name = type(self).__name__
            self.executable = os.path.join('.', self.name)
            self.depends_on('T0')

        @require_deps
        def sety(self, T0):
            self.y = T0().x + 1

        @run_before('run')
        @require_deps
        def setz(self, T0):
            self.z = T0().x + 2

    cases = executors.generate_testcases([T0(), T1()])
    deps, _ = dependencies.build_deps(cases)
    for c in dependencies.toposort(deps):
        _run(*c)

    for c in cases:
        t = c.check
        if t.name == 'T0':
            assert t.x == 1
        elif t.name == 'T1':
            assert t.y == 2
            assert t.z == 3
示例#17
0
def test_ci_gitlab_pipeline():
    loader = RegressionCheckLoader(
        ['unittests/resources/checks_unlisted/deps_complex.py'])
    cases = dependencies.toposort(
        dependencies.build_deps(executors.generate_testcases(
            loader.load_all()))[0])
    with io.StringIO() as fp:
        ci.emit_pipeline(fp, cases)
        pipeline = fp.getvalue()

    # Fetch the latest Gitlab CI JSON schema
    try:
        response = requests.get(
            'https://gitlab.com/gitlab-org/gitlab/-/raw/master/app/assets/javascripts/editor/schema/ci.json'  # noqa: E501
        )
    except requests.exceptions.ConnectionError as e:
        pytest.skip(f'could not reach URL: {e}')
    else:
        assert response.ok

    schema = response.json()
    jsonschema.validate(yaml.safe_load(pipeline), schema)
示例#18
0
def main():
    # Setup command line options
    argparser = argparse.ArgumentParser()
    output_options = argparser.add_argument_group(
        'Options controlling ReFrame output')
    locate_options = argparser.add_argument_group(
        'Options for discovering checks')
    select_options = argparser.add_argument_group(
        'Options for selecting checks')
    action_options = argparser.add_argument_group(
        'Options controlling actions')
    run_options = argparser.add_argument_group(
        'Options controlling the execution of checks')
    env_options = argparser.add_argument_group(
        'Options controlling the ReFrame environment')
    misc_options = argparser.add_argument_group('Miscellaneous options')

    # Output directory options
    output_options.add_argument('--dont-restage',
                                action='store_false',
                                dest='clean_stagedir',
                                help='Reuse the test stage directory',
                                envvar='RFM_CLEAN_STAGEDIR',
                                configvar='general/clean_stagedir')
    output_options.add_argument(
        '--keep-stage-files',
        action='store_true',
        help='Keep stage directories even for successful checks',
        envvar='RFM_KEEP_STAGE_FILES',
        configvar='general/keep_stage_files')
    output_options.add_argument('-o',
                                '--output',
                                action='store',
                                metavar='DIR',
                                help='Set output directory prefix to DIR',
                                envvar='RFM_OUTPUT_DIR',
                                configvar='systems/outputdir')
    output_options.add_argument(
        '--perflogdir',
        action='store',
        metavar='DIR',
        help=('Set performance log data directory prefix '
              '(relevant only to the filelog log handler)'),
        envvar='RFM_PERFLOG_DIR',
        configvar='logging/handlers_perflog/filelog_basedir')
    output_options.add_argument('--prefix',
                                action='store',
                                metavar='DIR',
                                help='Set general directory prefix to DIR',
                                envvar='RFM_PREFIX',
                                configvar='systems/prefix')
    output_options.add_argument('--report-file',
                                action='store',
                                metavar='FILE',
                                help="Store JSON run report in FILE",
                                envvar='RFM_REPORT_FILE',
                                configvar='general/report_file')
    output_options.add_argument('--report-junit',
                                action='store',
                                metavar='FILE',
                                help="Store a JUnit report in FILE",
                                envvar='RFM_REPORT_JUNIT',
                                configvar='general/report_junit')
    output_options.add_argument('-s',
                                '--stage',
                                action='store',
                                metavar='DIR',
                                help='Set stage directory prefix to DIR',
                                envvar='RFM_STAGE_DIR',
                                configvar='systems/stagedir')
    output_options.add_argument(
        '--save-log-files',
        action='store_true',
        default=False,
        help='Save ReFrame log files to the output directory',
        envvar='RFM_SAVE_LOG_FILES',
        configvar='general/save_log_files')
    output_options.add_argument(
        '--timestamp',
        action='store',
        nargs='?',
        const='%FT%T',
        metavar='TIMEFMT',
        help=('Append a timestamp to the output and stage directory prefixes '
              '(default: "%%FT%%T")'),
        envvar='RFM_TIMESTAMP_DIRS',
        configvar='general/timestamp_dirs')

    # Check discovery options
    locate_options.add_argument('-c',
                                '--checkpath',
                                action='append',
                                metavar='PATH',
                                help="Add PATH to the check search path list",
                                envvar='RFM_CHECK_SEARCH_PATH :',
                                configvar='general/check_search_path')
    locate_options.add_argument(
        '--ignore-check-conflicts',
        action='store_true',
        help=('Skip checks with conflicting names '
              '(this option is deprecated and has no effect)'),
        envvar='RFM_IGNORE_CHECK_CONFLICTS',
        configvar='general/ignore_check_conflicts')
    locate_options.add_argument(
        '-R',
        '--recursive',
        action='store_true',
        help='Search for checks in the search path recursively',
        envvar='RFM_CHECK_SEARCH_RECURSIVE',
        configvar='general/check_search_recursive')

    # Select options
    select_options.add_argument('--cpu-only',
                                action='store_true',
                                help='Select only CPU checks')
    select_options.add_argument(
        '--failed',
        action='store_true',
        help="Select failed test cases (only when '--restore-session' is used)"
    )
    select_options.add_argument('--gpu-only',
                                action='store_true',
                                help='Select only GPU checks')
    select_options.add_argument(
        '--maintainer',
        action='append',
        dest='maintainers',
        default=[],
        metavar='PATTERN',
        help='Select checks with at least one maintainer matching PATTERN')
    select_options.add_argument(
        '-n',
        '--name',
        action='append',
        dest='names',
        default=[],
        metavar='PATTERN',
        help='Select checks whose name matches PATTERN')

    # FIXME: The following is the only selection option that has an associated
    # (undocumented) configuration variable. This is to support pruning of the
    # partition environments as the runtime is created, similarly to how the
    # system partitions are treated. Currently, this facilitates the
    # implementation of fixtures, but we should reconsider it: see discussion
    # in https://github.com/eth-cscs/reframe/issues/2245
    select_options.add_argument(
        '-p',
        '--prgenv',
        action='append',
        default=[r'.*'],
        metavar='PATTERN',
        configvar='general/valid_env_names',
        help=('Select checks with at least one '
              'programming environment matching PATTERN'))
    select_options.add_argument(
        '-T',
        '--exclude-tag',
        action='append',
        dest='exclude_tags',
        metavar='PATTERN',
        default=[],
        help='Exclude checks whose tag matches PATTERN')
    select_options.add_argument(
        '-t',
        '--tag',
        action='append',
        dest='tags',
        metavar='PATTERN',
        default=[],
        help='Select checks with at least one tag matching PATTERN')
    select_options.add_argument(
        '-x',
        '--exclude',
        action='append',
        dest='exclude_names',
        metavar='PATTERN',
        default=[],
        help='Exclude checks whose name matches PATTERN')

    # Action options
    action_options.add_argument(
        '--ci-generate',
        action='store',
        metavar='FILE',
        help=('Generate into FILE a Gitlab CI pipeline '
              'for the selected tests and exit'),
    )
    action_options.add_argument(
        '-L',
        '--list-detailed',
        action='store_true',
        help='List the selected checks providing details for each test')
    action_options.add_argument('-l',
                                '--list',
                                action='store_true',
                                help='List the selected checks')
    action_options.add_argument(
        '--list-tags',
        action='store_true',
        help='List the unique tags found in the selected tests and exit')
    action_options.add_argument('-r',
                                '--run',
                                action='store_true',
                                help='Run the selected checks')

    # Run options
    run_options.add_argument('--disable-hook',
                             action='append',
                             metavar='NAME',
                             dest='hooks',
                             default=[],
                             help='Disable a pipeline hook for this run')
    run_options.add_argument(
        '--exec-policy',
        metavar='POLICY',
        action='store',
        choices=['async', 'serial'],
        default='async',
        help='Set the execution policy of ReFrame (default: "async")')
    run_options.add_argument(
        '--flex-alloc-nodes',
        action='store',
        dest='flex_alloc_nodes',
        metavar='{all|STATE|NUM}',
        default=None,
        help='Set strategy for the flexible node allocation (default: "idle").'
    )
    run_options.add_argument('--force-local',
                             action='store_true',
                             help='Force local execution of checks')
    run_options.add_argument('-J',
                             '--job-option',
                             action='append',
                             metavar='OPT',
                             dest='job_options',
                             default=[],
                             help='Pass option OPT to job scheduler')
    run_options.add_argument(
        '--max-retries',
        metavar='NUM',
        action='store',
        default=0,
        help='Set the maximum number of times a failed regression test '
        'may be retried (default: 0)')
    run_options.add_argument('--maxfail',
                             metavar='NUM',
                             action='store',
                             default=sys.maxsize,
                             help='Exit after first NUM failures')
    run_options.add_argument('--mode',
                             action='store',
                             help='Execution mode to use')
    run_options.add_argument('--restore-session',
                             action='store',
                             nargs='?',
                             const='',
                             metavar='REPORT',
                             help='Restore a testing session from REPORT file')
    run_options.add_argument('-S',
                             '--setvar',
                             action='append',
                             metavar='[TEST.]VAR=VAL',
                             dest='vars',
                             default=[],
                             help=('Set test variable VAR to VAL in all tests '
                                   'or optionally in TEST only'))
    run_options.add_argument('--skip-performance-check',
                             action='store_true',
                             help='Skip performance checking')
    run_options.add_argument('--skip-prgenv-check',
                             action='store_true',
                             help='Skip programming environment check')
    run_options.add_argument('--skip-sanity-check',
                             action='store_true',
                             help='Skip sanity checking')
    run_options.add_argument('--skip-system-check',
                             action='store_true',
                             help='Skip system check')
    run_options.add_argument('--strict',
                             action='store_true',
                             help='Enforce strict performance checking')

    # Environment options
    env_options.add_argument('-M',
                             '--map-module',
                             action='append',
                             metavar='MAPPING',
                             dest='module_mappings',
                             default=[],
                             help='Add a module mapping',
                             envvar='RFM_MODULE_MAPPINGS ,',
                             configvar='general/module_mappings')
    env_options.add_argument(
        '-m',
        '--module',
        action='append',
        default=[],
        metavar='MOD',
        dest='user_modules',
        help='Load module MOD before running any regression check',
        envvar='RFM_USER_MODULES ,',
        configvar='general/user_modules')
    env_options.add_argument('--module-mappings',
                             action='store',
                             metavar='FILE',
                             dest='module_map_file',
                             help='Load module mappings from FILE',
                             envvar='RFM_MODULE_MAP_FILE',
                             configvar='general/module_map_file')
    env_options.add_argument(
        '--module-path',
        action='append',
        metavar='PATH',
        dest='module_paths',
        default=[],
        help='(Un)use module path PATH before running any regression check',
    )
    env_options.add_argument(
        '--non-default-craype',
        action='store_true',
        help='Test a non-default Cray Programming Environment',
        envvar='RFM_NON_DEFAULT_CRAYPE',
        configvar='general/non_default_craype')
    env_options.add_argument(
        '--purge-env',
        action='store_true',
        dest='purge_env',
        default=False,
        help='Unload all modules before running any regression check',
        envvar='RFM_PURGE_ENVIRONMENT',
        configvar='general/purge_environment')
    env_options.add_argument(
        '-u',
        '--unload-module',
        action='append',
        metavar='MOD',
        dest='unload_modules',
        default=[],
        help='Unload module MOD before running any regression check',
        envvar='RFM_UNLOAD_MODULES ,',
        configvar='general/unload_modules')

    # Miscellaneous options
    misc_options.add_argument('-C',
                              '--config-file',
                              action='store',
                              dest='config_file',
                              metavar='FILE',
                              help='Set configuration file',
                              envvar='RFM_CONFIG_FILE')
    misc_options.add_argument('--detect-host-topology',
                              action='store',
                              nargs='?',
                              const='-',
                              help='Detect the local host topology and exit')
    misc_options.add_argument('--failure-stats',
                              action='store_true',
                              help='Print failure statistics')
    misc_options.add_argument('--nocolor',
                              action='store_false',
                              dest='colorize',
                              help='Disable coloring of output',
                              envvar='RFM_COLORIZE',
                              configvar='general/colorize')
    misc_options.add_argument('--performance-report',
                              action='store_true',
                              help='Print a report for performance tests')
    misc_options.add_argument(
        '--show-config',
        action='store',
        nargs='?',
        const='all',
        metavar='PARAM',
        help='Print the value of configuration parameter PARAM and exit')
    misc_options.add_argument('--system',
                              action='store',
                              help='Load configuration for SYSTEM',
                              envvar='RFM_SYSTEM')
    misc_options.add_argument(
        '--upgrade-config-file',
        action='store',
        metavar='OLD[:NEW]',
        help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax')
    misc_options.add_argument('-V',
                              '--version',
                              action='version',
                              version=osext.reframe_version())
    misc_options.add_argument('-v',
                              '--verbose',
                              action='count',
                              help='Increase verbosity level of output',
                              envvar='RFM_VERBOSE',
                              configvar='general/verbose')
    misc_options.add_argument(
        '-q',
        '--quiet',
        action='count',
        default=0,
        help='Decrease verbosity level of output',
    )

    # Options not associated with command-line arguments
    argparser.add_argument(
        dest='git_timeout',
        envvar='RFM_GIT_TIMEOUT',
        configvar='general/git_timeout',
        help=('Timeout in seconds when checking if the url is a '
              'valid repository.'))
    argparser.add_argument(
        dest='graylog_server',
        envvar='RFM_GRAYLOG_ADDRESS',
        configvar='logging/handlers_perflog/graylog_address',
        help='Graylog server address')
    argparser.add_argument(dest='httpjson_url',
                           envvar='RFM_HTTPJSON_URL',
                           configvar='logging/handlers_perflog/httpjson_url',
                           help='URL of HTTP server accepting JSON logs')
    argparser.add_argument(dest='ignore_reqnodenotavail',
                           envvar='RFM_IGNORE_REQNODENOTAVAIL',
                           configvar='schedulers/ignore_reqnodenotavail',
                           action='store_true',
                           help='Graylog server address')
    argparser.add_argument(dest='compact_test_names',
                           envvar='RFM_COMPACT_TEST_NAMES',
                           configvar='general/compact_test_names',
                           action='store_true',
                           help='Use a compact test naming scheme')
    argparser.add_argument(dest='remote_detect',
                           envvar='RFM_REMOTE_DETECT',
                           configvar='general/remote_detect',
                           action='store_true',
                           help='Detect remote system topology')
    argparser.add_argument(
        dest='remote_workdir',
        envvar='RFM_REMOTE_WORKDIR',
        configvar='general/remote_workdir',
        action='store',
        help='Working directory for launching ReFrame remotely')
    argparser.add_argument(dest='resolve_module_conflicts',
                           envvar='RFM_RESOLVE_MODULE_CONFLICTS',
                           configvar='general/resolve_module_conflicts',
                           action='store_true',
                           help='Resolve module conflicts automatically')
    argparser.add_argument(dest='syslog_address',
                           envvar='RFM_SYSLOG_ADDRESS',
                           configvar='logging/handlers_perflog/syslog_address',
                           help='Syslog server address')
    argparser.add_argument(
        dest='trap_job_errors',
        envvar='RFM_TRAP_JOB_ERRORS',
        configvar='general/trap_job_errors',
        action='store_true',
        help='Trap job errors in job scripts and fail tests automatically')
    argparser.add_argument(dest='use_login_shell',
                           envvar='RFM_USE_LOGIN_SHELL',
                           configvar='general/use_login_shell',
                           action='store_true',
                           help='Use a login shell for job scripts')

    # Parse command line
    options = argparser.parse_args()
    if len(sys.argv) == 1:
        argparser.print_help()
        sys.exit(1)

    # First configure logging with our generic configuration so as to be able
    # to print pretty messages; logging will be reconfigured by user's
    # configuration later
    site_config = config.load_config(
        os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py'))
    site_config.select_subconfig('generic')
    options.update_config(site_config)
    logging.configure_logging(site_config)
    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer = PrettyPrinter()
    printer.colorize = site_config.get('general/0/colorize')
    printer.adjust_verbosity(calc_verbosity(site_config, options.quiet))
    if os.getenv('RFM_GRAYLOG_SERVER'):
        printer.warning(
            'RFM_GRAYLOG_SERVER environment variable is deprecated; '
            'please use RFM_GRAYLOG_ADDRESS instead')
        os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')

    if options.upgrade_config_file is not None:
        old_config, *new_config = options.upgrade_config_file.split(':',
                                                                    maxsplit=1)
        new_config = new_config[0] if new_config else None

        try:
            new_config = config.convert_old_config(old_config, new_config)
        except Exception as e:
            printer.error(f'could not convert file: {e}')
            sys.exit(1)

        printer.info(f'Conversion successful! '
                     f'The converted file can be found at {new_config!r}.')
        sys.exit(0)

    # Now configure ReFrame according to the user configuration file
    try:
        try:
            printer.debug('Loading user configuration')
            site_config = config.load_config(options.config_file)
        except warnings.ReframeDeprecationWarning as e:
            printer.warning(e)
            converted = config.convert_old_config(options.config_file)
            printer.warning(f"configuration file has been converted "
                            f"to the new syntax here: '{converted}'")
            site_config = config.load_config(converted)

        site_config.validate()

        # We ignore errors about unresolved sections or configuration
        # parameters here, because they might be defined at the individual
        # partition level and will be caught when we will instantiating
        # internally the system and partitions later on.
        site_config.select_subconfig(options.system,
                                     ignore_resolve_errors=True)
        for err in options.update_config(site_config):
            printer.warning(str(err))

        # Update options from the selected execution mode
        if options.mode:
            mode_args = site_config.get(f'modes/@{options.mode}/options')

            # We lexically split the mode options, because otherwise spaces
            # will be treated as part of the option argument; see GH bug #1554
            mode_args = list(
                itertools.chain.from_iterable(
                    shlex.split(m) for m in mode_args))
            # Parse the mode's options and reparse the command-line
            options = argparser.parse_args(mode_args)
            options = argparser.parse_args(namespace=options.cmd_options)
            options.update_config(site_config)

        logging.configure_logging(site_config)
    except (OSError, errors.ConfigError) as e:
        printer.error(f'failed to load configuration: {e}')
        printer.error(logfiles_message())
        sys.exit(1)

    logging.getlogger().colorize = site_config.get('general/0/colorize')
    printer.colorize = site_config.get('general/0/colorize')
    printer.adjust_verbosity(calc_verbosity(site_config, options.quiet))
    try:
        printer.debug('Initializing runtime')
        runtime.init_runtime(site_config)
    except errors.ConfigError as e:
        printer.error(f'failed to initialize runtime: {e}')
        printer.error(logfiles_message())
        sys.exit(1)

    if site_config.get('general/0/ignore_check_conflicts'):
        logging.getlogger().warning(
            "the 'ignore_check_conflicts' option is deprecated "
            "and will be removed in the future")

    rt = runtime.runtime()
    try:
        if site_config.get('general/0/module_map_file'):
            rt.modules_system.load_mapping_from_file(
                site_config.get('general/0/module_map_file'))

        if site_config.get('general/0/module_mappings'):
            for m in site_config.get('general/0/module_mappings'):
                rt.modules_system.load_mapping(m)

    except (errors.ConfigError, OSError) as e:
        printer.error('could not load module mappings: %s' % e)
        sys.exit(1)

    if (osext.samefile(rt.stage_prefix, rt.output_prefix)
            and not site_config.get('general/0/keep_stage_files')):
        printer.error("stage and output refer to the same directory; "
                      "if this is on purpose, please use the "
                      "'--keep-stage-files' option.")
        printer.error(logfiles_message())
        sys.exit(1)

    # Show configuration after everything is set up
    if options.show_config:
        config_param = options.show_config
        if config_param == 'all':
            printer.info(str(rt.site_config))
        else:
            value = rt.get_option(config_param)
            if value is None:
                printer.error(
                    f'no such configuration parameter found: {config_param}')
            else:
                printer.info(json.dumps(value, indent=2))

        sys.exit(0)

    if options.detect_host_topology:
        from reframe.utility.cpuinfo import cpuinfo

        topofile = options.detect_host_topology
        if topofile == '-':
            json.dump(cpuinfo(), sys.stdout, indent=2)
            sys.stdout.write('\n')
        else:
            try:
                with open(topofile, 'w') as fp:
                    json.dump(cpuinfo(), fp, indent=2)
                    fp.write('\n')
            except OSError as e:
                getlogger().error(
                    f'could not write topology file: {topofile!r}')
                sys.exit(1)

        sys.exit(0)

    autodetect.detect_topology()
    printer.debug(format_env(options.env_vars))

    # Setup the check loader
    if options.restore_session is not None:
        # We need to load the failed checks only from a list of reports
        if options.restore_session:
            filenames = options.restore_session.split(',')
        else:
            filenames = [
                runreport.next_report_filename(osext.expandvars(
                    site_config.get('general/0/report_file')),
                                               new=False)
            ]

        report = runreport.load_report(*filenames)
        check_search_path = list(report.slice('filename', unique=True))
        check_search_recursive = False

        # If `-c` or `-R` are passed explicitly outside the configuration
        # file, override the values set from the report file
        if site_config.is_sticky_option('general/check_search_path'):
            printer.warning(
                'Ignoring check search path set in the report file: '
                'search path set explicitly in the command-line or '
                'the environment')
            check_search_path = site_config.get('general/0/check_search_path')

        if site_config.is_sticky_option('general/check_search_recursive'):
            printer.warning(
                'Ignoring check search recursive option from the report file: '
                'option set explicitly in the command-line or the environment')
            check_search_recursive = site_config.get(
                'general/0/check_search_recursive')

    else:
        check_search_recursive = site_config.get(
            'general/0/check_search_recursive')
        check_search_path = site_config.get('general/0/check_search_path')

    # Collect any variables set from the command line
    external_vars = {}
    for expr in options.vars:
        try:
            lhs, rhs = expr.split('=', maxsplit=1)
        except ValueError:
            printer.warning(
                f'invalid test variable assignment: {expr!r}; skipping')
        else:
            external_vars[lhs] = rhs

    loader = RegressionCheckLoader(check_search_path, check_search_recursive,
                                   external_vars)

    def print_infoline(param, value):
        param = param + ':'
        printer.info(f"  {param.ljust(18)} {value}")

    session_info = {
        'cmdline': ' '.join(sys.argv),
        'config_file': rt.site_config.filename,
        'data_version': runreport.DATA_VERSION,
        'hostname': socket.getfqdn(),
        'prefix_output': rt.output_prefix,
        'prefix_stage': rt.stage_prefix,
        'user': osext.osuser(),
        'version': osext.reframe_version(),
        'workdir': os.getcwd(),
    }

    # Print command line
    printer.info(f"[ReFrame Setup]")
    print_infoline('version', session_info['version'])
    print_infoline('command', repr(session_info['cmdline']))
    print_infoline(
        f"launched by",
        f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}")
    print_infoline('working directory', repr(session_info['workdir']))
    print_infoline('settings file', f"{session_info['config_file']!r}")
    print_infoline(
        'check search path', f"{'(R) ' if loader.recurse else ''}"
        f"{':'.join(loader.load_path)!r}")
    print_infoline('stage directory', repr(session_info['prefix_stage']))
    print_infoline('output directory', repr(session_info['prefix_output']))
    printer.info('')
    try:
        # Locate and load checks
        checks_found = loader.load_all()
        printer.verbose(f'Loaded {len(checks_found)} test(s)')

        # Generate all possible test cases first; we will need them for
        # resolving dependencies after filtering
        testcases_all = generate_testcases(checks_found,
                                           options.skip_system_check,
                                           options.skip_prgenv_check)
        testcases = testcases_all
        printer.verbose(f'Generated {len(testcases)} test case(s)')

        # Filter test cases by name
        if options.exclude_names:
            for name in options.exclude_names:
                testcases = filter(filters.have_not_name(name), testcases)

        if options.names:
            testcases = filter(filters.have_name('|'.join(options.names)),
                               testcases)

        testcases = list(testcases)
        printer.verbose(
            f'Filtering test cases(s) by name: {len(testcases)} remaining')

        # Filter test cases by tags
        for tag in options.exclude_tags:
            testcases = filter(filters.have_not_tag(tag), testcases)

        for tag in options.tags:
            testcases = filter(filters.have_tag(tag), testcases)

        testcases = list(testcases)
        printer.verbose(
            f'Filtering test cases(s) by tags: {len(testcases)} remaining')

        # Filter test cases by maintainers
        for maint in options.maintainers:
            testcases = filter(filters.have_maintainer(maint), testcases)

        # Filter test cases further
        if options.gpu_only and options.cpu_only:
            printer.error("options `--gpu-only' and `--cpu-only' "
                          "are mutually exclusive")
            sys.exit(1)

        if options.gpu_only:
            testcases = filter(filters.have_gpu_only(), testcases)
        elif options.cpu_only:
            testcases = filter(filters.have_cpu_only(), testcases)

        testcases = list(testcases)
        printer.verbose(f'Filtering test cases(s) by other attributes: '
                        f'{len(testcases)} remaining')

        # Filter in failed cases
        if options.failed:
            if options.restore_session is None:
                printer.error(
                    "the option '--failed' can only be used "
                    "in combination with the '--restore-session' option")
                sys.exit(1)

            def _case_failed(t):
                rec = report.case(*t)
                if not rec:
                    return False

                return (rec['result'] == 'failure'
                        or rec['result'] == 'aborted')

            testcases = list(filter(_case_failed, testcases))
            printer.verbose(f'Filtering successful test case(s): '
                            f'{len(testcases)} remaining')

        # Prepare for running
        printer.debug('Building and validating the full test DAG')
        testgraph, skipped_cases = dependencies.build_deps(testcases_all)
        if skipped_cases:
            # Some cases were skipped, so adjust testcases
            testcases = list(set(testcases) - set(skipped_cases))
            printer.verbose(
                f'Filtering test case(s) due to unresolved dependencies: '
                f'{len(testcases)} remaining')

        dependencies.validate_deps(testgraph)
        printer.debug('Full test DAG:')
        printer.debug(dependencies.format_deps(testgraph))

        restored_cases = []
        if len(testcases) != len(testcases_all):
            testgraph = dependencies.prune_deps(
                testgraph,
                testcases,
                max_depth=1 if options.restore_session is not None else None)
            printer.debug('Pruned test DAG')
            printer.debug(dependencies.format_deps(testgraph))
            if options.restore_session is not None:
                testgraph, restored_cases = report.restore_dangling(testgraph)

        testcases = dependencies.toposort(testgraph,
                                          is_subgraph=options.restore_session
                                          is not None)
        printer.verbose(f'Final number of test cases: {len(testcases)}')

        # Disable hooks
        for tc in testcases:
            for h in options.hooks:
                tc.check.disable_hook(h)

        # Act on checks
        if options.list or options.list_detailed:
            list_checks(testcases, printer, options.list_detailed)
            sys.exit(0)

        if options.list_tags:
            list_tags(testcases, printer)
            sys.exit(0)

        if options.ci_generate:
            list_checks(testcases, printer)
            printer.info('[Generate CI]')
            with open(options.ci_generate, 'wt') as fp:
                ci.emit_pipeline(fp, testcases)

            printer.info(f'  Gitlab pipeline generated successfully '
                         f'in {options.ci_generate!r}.\n')
            sys.exit(0)

        if not options.run:
            printer.error("No action option specified. Available options:\n"
                          "  - `-l'/`-L' for listing\n"
                          "  - `-r' for running\n"
                          "  - `--list-tags' for listing unique test tags\n"
                          "  - `--ci-generate' for generating a CI pipeline\n"
                          f"Try `{argparser.prog} -h' for more options.")
            sys.exit(1)

        # Manipulate ReFrame's environment
        if site_config.get('general/0/purge_environment'):
            rt.modules_system.unload_all()
        else:
            for m in site_config.get('general/0/unload_modules'):
                rt.modules_system.unload_module(**m)

        # Load the environment for the current system
        try:
            printer.debug(f'Loading environment for current system')
            runtime.loadenv(rt.system.preload_environ)
        except errors.EnvironError as e:
            printer.error("failed to load current system's environment; "
                          "please check your configuration")
            printer.debug(str(e))
            raise

        def module_use(*paths):
            try:
                rt.modules_system.searchpath_add(*paths)
            except errors.EnvironError as e:
                printer.warning(f'could not add module paths correctly')
                printer.debug(str(e))

        def module_unuse(*paths):
            try:
                rt.modules_system.searchpath_remove(*paths)
            except errors.EnvironError as e:
                printer.warning(f'could not remove module paths correctly')
                printer.debug(str(e))

        printer.debug('(Un)using module paths from command line')
        module_paths = {}
        for d in options.module_paths:
            if d.startswith('-'):
                module_paths.setdefault('-', [])
                module_paths['-'].append(d[1:])
            elif d.startswith('+'):
                module_paths.setdefault('+', [])
                module_paths['+'].append(d[1:])
            else:
                module_paths.setdefault('x', [])
                module_paths['x'].append(d)

        for op, paths in module_paths.items():
            if op == '+':
                module_use(*paths)
            elif op == '-':
                module_unuse(*paths)
            else:
                # First empty the current module path in a portable way
                searchpath = [p for p in rt.modules_system.searchpath if p]
                if searchpath:
                    rt.modules_system.searchpath_remove(*searchpath)

                # Treat `A:B` syntax as well in this case
                paths = itertools.chain(*(p.split(':') for p in paths))
                module_use(*paths)

        printer.debug('Loading user modules from command line')
        for m in site_config.get('general/0/user_modules'):
            try:
                rt.modules_system.load_module(**m, force=True)
            except errors.EnvironError as e:
                printer.warning(
                    f'could not load module {m["name"]!r} correctly; '
                    f'skipping...')
                printer.debug(str(e))

        options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'

        # Run the tests

        # Setup the execution policy
        if options.exec_policy == 'serial':
            exec_policy = SerialExecutionPolicy()
        elif options.exec_policy == 'async':
            exec_policy = AsynchronousExecutionPolicy()
        else:
            # This should not happen, since choices are handled by
            # argparser
            printer.error("unknown execution policy `%s': Exiting...")
            sys.exit(1)

        exec_policy.skip_system_check = options.skip_system_check
        exec_policy.force_local = options.force_local
        exec_policy.strict_check = options.strict
        exec_policy.skip_sanity_check = options.skip_sanity_check
        exec_policy.skip_performance_check = options.skip_performance_check
        exec_policy.keep_stage_files = site_config.get(
            'general/0/keep_stage_files')
        try:
            errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
            sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
            if sched_flex_alloc_nodes <= 0:
                raise errors.ConfigError(
                    errmsg.format(options.flex_alloc_nodes))
        except ValueError:
            sched_flex_alloc_nodes = options.flex_alloc_nodes

        exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
        parsed_job_options = []
        for opt in options.job_options:
            opt_split = opt.split('=', maxsplit=1)
            optstr = opt_split[0]
            valstr = opt_split[1] if len(opt_split) > 1 else ''
            if opt.startswith('-') or opt.startswith('#'):
                parsed_job_options.append(opt)
            elif len(optstr) == 1:
                parsed_job_options.append(f'-{optstr} {valstr}')
            else:
                parsed_job_options.append(f'--{optstr} {valstr}')

        exec_policy.sched_options = parsed_job_options
        try:
            max_retries = int(options.max_retries)
        except ValueError:
            raise errors.ConfigError(
                f'--max-retries is not a valid integer: {max_retries}'
            ) from None

        try:
            max_failures = int(options.maxfail)
            if max_failures < 0:
                raise errors.ConfigError(
                    f'--maxfail should be a non-negative integer: '
                    f'{options.maxfail!r}')
        except ValueError:
            raise errors.ConfigError(
                f'--maxfail is not a valid integer: {options.maxfail!r}'
            ) from None

        runner = Runner(exec_policy, printer, max_retries, max_failures)
        try:
            time_start = time.time()
            session_info['time_start'] = time.strftime(
                '%FT%T%z',
                time.localtime(time_start),
            )
            runner.runall(testcases, restored_cases)
        finally:
            time_end = time.time()
            session_info['time_end'] = time.strftime('%FT%T%z',
                                                     time.localtime(time_end))
            session_info['time_elapsed'] = time_end - time_start

            # Print a retry report if we did any retries
            if runner.stats.failed(run=0):
                printer.info(runner.stats.retry_report())

            # Print a failure report if we had failures in the last run
            success = True
            if runner.stats.failed():
                success = False
                runner.stats.print_failure_report(printer)
                if options.failure_stats:
                    runner.stats.print_failure_stats(printer)

            if options.performance_report:
                printer.info(runner.stats.performance_report())

            # Generate the report for this session
            report_file = os.path.normpath(
                osext.expandvars(rt.get_option('general/0/report_file')))
            basedir = os.path.dirname(report_file)
            if basedir:
                os.makedirs(basedir, exist_ok=True)

            # Build final JSON report
            run_stats = runner.stats.json()
            session_info.update({
                'num_cases': run_stats[0]['num_cases'],
                'num_failures': run_stats[-1]['num_failures']
            })
            json_report = {
                'session_info': session_info,
                'runs': run_stats,
                'restored_cases': []
            }
            if options.restore_session is not None:
                for c in restored_cases:
                    json_report['restored_cases'].append(report.case(*c))

            report_file = runreport.next_report_filename(report_file)
            try:
                with open(report_file, 'w') as fp:
                    jsonext.dump(json_report, fp, indent=2)
                    fp.write('\n')

                printer.info(f'Run report saved in {report_file!r}')
            except OSError as e:
                printer.warning(
                    f'failed to generate report in {report_file!r}: {e}')

            # Generate the junit xml report for this session
            junit_report_file = rt.get_option('general/0/report_junit')
            if junit_report_file:
                # Expand variables in filename
                junit_report_file = osext.expandvars(junit_report_file)
                junit_xml = runreport.junit_xml_report(json_report)
                try:
                    with open(junit_report_file, 'w') as fp:
                        runreport.junit_dump(junit_xml, fp)
                except OSError as e:
                    printer.warning(
                        f'failed to generate report in {junit_report_file!r}: '
                        f'{e}')

        if not success:
            sys.exit(1)

        sys.exit(0)
    except (Exception, KeyboardInterrupt, errors.ReframeFatalError):
        exc_info = sys.exc_info()
        tb = ''.join(traceback.format_exception(*exc_info))
        printer.error(f'run session stopped: {errors.what(*exc_info)}')
        if errors.is_exit_request(*exc_info):
            # Print stack traces for exit requests only when TOO verbose
            printer.debug2(tb)
        elif errors.is_severe(*exc_info):
            printer.error(tb)
        else:
            printer.verbose(tb)

        sys.exit(1)
    finally:
        try:
            log_files = logging.log_files()
            if site_config.get('general/0/save_log_files'):
                log_files = logging.save_log_files(rt.output_prefix)

        except OSError as e:
            printer.error(f'could not save log file: {e}')
            sys.exit(1)
        finally:
            printer.info(logfiles_message())
示例#19
0
def test_build_deps(loader, default_exec_ctx):
    checks = loader.load_all(force=True)

    # We need to prepare the test cases as if we were about to run them,
    # because we want to test `getdep()` as well, which normally gets resolved
    # during the `setup` phase of the pipeline
    cases = executors.generate_testcases(checks, prepare=True)

    # Test calling getdep() before having built the graph
    t = find_check('Test1_fully', checks)
    with pytest.raises(DependencyError):
        t.getdep('Test0', 'e0', 'p0')

    # Build dependencies and continue testing
    deps, _ = dependencies.build_deps(cases)
    dependencies.validate_deps(deps)

    # Check dependencies for fully connected graph
    assert num_deps(deps, 'Test1_fully') == 16
    for p0 in ['sys0:p0', 'sys0:p1']:
        for p1 in ['sys0:p0', 'sys0:p1']:
            for e0 in ['e0', 'e1']:
                for e1 in ['e0', 'e1']:
                    assert has_edge(deps,
                                    Node('Test1_fully', p0, e0),
                                    Node('Test0', p1, e1))

    # Check dependencies with same partition
    assert num_deps(deps, 'Test1_by_part') == 8
    for p in ['sys0:p0', 'sys0:p1']:
        for e0 in ['e0', 'e1']:
            for e1 in ['e0', 'e1']:
                assert has_edge(deps,
                                Node('Test1_by_part', p, e0),
                                Node('Test0', p, e1))

    # Check dependencies with same partition environment
    assert num_deps(deps, 'Test1_by_case') == 4
    assert num_deps(deps, 'Test1_default') == 4
    for p in ['sys0:p0', 'sys0:p1']:
        for e in ['e0', 'e1']:
            assert has_edge(deps,
                            Node('Test1_by_case', p, e),
                            Node('Test0', p, e))
            assert has_edge(deps,
                            Node('Test1_default', p, e),
                            Node('Test0', p, e))

    assert num_deps(deps, 'Test1_any') == 12
    for p0 in ['sys0:p0', 'sys0:p1']:
        for p1 in ['sys0:p0', 'sys0:p1']:
            for e0 in ['e0', 'e1']:
                for e1 in ['e0', 'e1']:
                    if (p0 == 'sys0:p0' or e1 == 'e1'):
                        assert has_edge(deps,
                                        Node('Test1_any', p0, e0),
                                        Node('Test0', p1, e1))

    assert num_deps(deps, 'Test1_all') == 2
    for p0 in ['sys0:p0', 'sys0:p1']:
        for p1 in ['sys0:p0', 'sys0:p1']:
            for e0 in ['e0', 'e1']:
                for e1 in ['e0', 'e1']:
                    if (p0 == 'sys0:p0' and p1 == 'sys0:p0' and e1 == 'e1'):
                        assert has_edge(deps,
                                        Node('Test1_any', p0, e0),
                                        Node('Test0', p1, e1))

    # Check custom dependencies
    assert num_deps(deps, 'Test1_custom') == 1
    assert has_edge(deps,
                    Node('Test1_custom', 'sys0:p0', 'e0'),
                    Node('Test0', 'sys0:p1', 'e1'))

    # Check dependencies of Test1_nodeps
    assert num_deps(deps, 'Test1_nodeps') == 0

    # Check in-degree of Test0

    # 4 from Test1_fully,
    # 2 from Test1_by_part,
    # 1 from Test1_by_case,
    # 2 from Test1_any,
    # 2 from Test1_all,
    # 0 from Test1_custom,
    # 1 from Test1_default
    # 0 from Test1_nodeps
    assert in_degree(deps, Node('Test0', 'sys0:p0', 'e0')) == 12

    # 4 from Test1_fully,
    # 2 from Test1_by_part,
    # 1 from Test1_by_case,
    # 2 from Test1_any,
    # 0 from Test1_all,
    # 0 from Test1_custom,
    # 1 from Test1_default
    # 0 from Test1_nodeps
    assert in_degree(deps, Node('Test0', 'sys0:p1', 'e0')) == 10

    # 4 from Test1_fully,
    # 2 from Test1_by_part,
    # 1 from Test1_by_case,
    # 4 from Test1_any,
    # 0 from Test1_all,
    # 0 from Test1_custom,
    # 1 from Test1_default
    # 0 from Test1_nodeps
    assert in_degree(deps, Node('Test0', 'sys0:p0', 'e1')) == 12

    # 4 from Test1_fully,
    # 2 from Test1_by_part,
    # 1 from Test1_by_case,
    # 4 from Test1_any,
    # 0 from Test1_all,
    # 1 from Test1_custom,
    # 1 from Test1_default
    # 0 from Test1_nodeps
    assert in_degree(deps, Node('Test0', 'sys0:p1', 'e1')) == 13

    # Pick a check to test getdep()
    check_e0 = find_case('Test1_by_part', 'e0', 'p0', cases).check
    check_e1 = find_case('Test1_by_part', 'e1', 'p0', cases).check

    with pytest.raises(DependencyError):
        check_e0.getdep('Test0', 'p0')

    # Set the current environment
    check_e0._current_environ = Environment('e0')
    check_e1._current_environ = Environment('e1')

    assert check_e0.getdep('Test0', 'e0', 'p0').name == 'Test0'
    assert check_e0.getdep('Test0', 'e1', 'p0').name == 'Test0'
    assert check_e1.getdep('Test0', 'e1', 'p0').name == 'Test0'
    with pytest.raises(DependencyError):
        check_e0.getdep('TestX_deprecated', 'e0', 'p0')

    with pytest.raises(DependencyError):
        check_e0.getdep('Test0', 'eX', 'p0')

    with pytest.raises(DependencyError):
        check_e1.getdep('Test0', 'e0', 'p1')
示例#20
0
def test_build_deps_empty(default_exec_ctx):
    assert {} == dependencies.build_deps([])[0]