Esempio n. 1
0
def test_heterogeneous_arguments_are_detected(lang: str, tmp_path: Path,
                                              pytestconfig):
    conf = configuration(pytestconfig, "isbn", lang, tmp_path, "full.tson",
                         "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 0
    assert updates.find_status_enum() == ["internal error"]
Esempio n. 2
0
def test_too_much_output(tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo", "python", tmp_path, "two.tson",
                         "output_limit", {"output_limit": 1000})
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # 4 times: two times for the tests, one escalate and one judgement.
    assert updates.find_status_enum() == ["output limit exceeded"] * 4
    assert len(updates.find_all("close-test")) == 2
Esempio n. 3
0
def test_programmed_evaluator_wrong(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "lotto", lang, tmp_path,
                         "one-programmed-python.tson", "wrong")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 1
    assert updates.find_status_enum() == ["wrong"]
    assert len(updates.find_all("append-message")) == 1
Esempio n. 4
0
def test_javascript_async(exercise: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, exercise, "javascript", tmp_path,
                         "one.tson", "correct-async")
    workdir = Path(conf.resources).parent / "workdir"
    if workdir.exists():
        shutil.copytree(workdir, tmp_path, dirs_exist_ok=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 5
0
def test_hide_expected_wrong(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo", language, tmp_path,
                         "one-hide-expected.tson", "wrong")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
    assert len(
        list(
            filter(lambda x: not bool(x["expected"]),
                   updates.find_all("start-test")))) == 1
Esempio n. 6
0
def test_io_function_file_exercise(language: str, tmp_path: Path,
                                   pytestconfig):
    conf = configuration(pytestconfig, "echo-function-file", language,
                         tmp_path, "one.tson", "correct")
    shutil.copytree(Path(conf.resources).parent / "workdir",
                    tmp_path,
                    dirs_exist_ok=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 7
0
def test_program_params(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "sum", lang, tmp_path, "short.tson",
                         "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == [
        'correct', 'correct', 'correct', 'correct'
    ]
    assert len(updates.find_all("start-testcase")) == 3
    assert len(updates.find_all("start-test")) == 4
Esempio n. 8
0
def test_javascript_exception_missing_message(tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig,
                         "js-exceptions",
                         "javascript",
                         tmp_path,
                         "plan.yaml",
                         "wrong-message",
                         backward_compatibility_plan=True)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["wrong"]
    assert len(updates.find_all("append-message")) == 1
Esempio n. 9
0
def test_crashing_assignment_with_before(lang: str, tmp_path: Path,
                                         pytestconfig):
    conf = configuration(pytestconfig, "isbn", lang, tmp_path,
                         f"one-with-crashing-assignment-{lang}.tson",
                         "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # Only the assignment was started.
    assert len(updates.find_all("start-testcase")) == 1
    assert updates.find_status_enum() == ["wrong"]
    # Assert the exception is included.
    assert updates.find_next("start-test")["channel"] == "exception"
Esempio n. 10
0
def test_batch_compilation_no_fallback(language: str, tmp_path: Path,
                                       pytestconfig):
    config_ = {"options": {"allow_fallback": False}}
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "run-error", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    # One wrong status for every stderr + stdout
    assert len(updates.find_status_enum()) >= 4
    # There could be more wrongs: some languages might modify the exit code
    assert all(s == "wrong" for s in updates.find_status_enum())
Esempio n. 11
0
def test_assignment_and_use_in_expression_list(lang: str, tmp_path: Path,
                                               pytestconfig):
    conf = configuration(pytestconfig, "isbn-list", lang, tmp_path,
                         "one-with-assignment.tson", "solution")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    # Assert that the empty context testcase is not shown, while the assignment
    # and expression testcase are shown.
    assert len(updates.find_all("start-testcase")) == 2
    # Assert the only one test was executed.
    assert updates.find_status_enum() == ["correct"]
    assert len(updates.find_all("start-test")) == 1
Esempio n. 12
0
def test_full_echo(lang: str, tmp_path: Path, pytestconfig):
    config_ = {"options": {"parallel": True}}
    conf = configuration(pytestconfig,
                         "echo",
                         lang,
                         tmp_path,
                         "full.tson",
                         "correct",
                         options=config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 50
    assert updates.find_status_enum() == ["correct"] * 50
Esempio n. 13
0
def test_batch_compilation_fallback(language: str, tmp_path: Path,
                                    pytestconfig, mocker):
    lang_class = LANGUAGES[language]
    class_instance = lang_class()
    mocker.patch.object(lang_class,
                        'compilation',
                        wraps=class_instance.compilation)
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "comp-error")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    assert updates.find_status_enum() == ["compilation error"] * 2
    assert class_instance.compilation.call_count == 3
Esempio n. 14
0
def test_context_compilation(language: str, tmp_path: Path, pytestconfig,
                             mocker):
    config_ = {"options": {"mode": "context"}}
    # Mock the compilation callback to ensure we call it for every context.
    lang_class = LANGUAGES[language]
    class_instance = lang_class()
    mocker.patch.object(lang_class,
                        'compilation',
                        wraps=class_instance.compilation)
    conf = configuration(pytestconfig, "echo", language, tmp_path, "two.tson",
                         "correct", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 2
    assert updates.find_status_enum() == ["correct"] * 2
    assert class_instance.compilation.call_count == 2
Esempio n. 15
0
def test_io_function_display_multiline_exercise(language: str, tmp_path: Path,
                                                pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one-display-multiline.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
    start_test = updates.find_all('start-test')
    close_test = updates.find_all('close-test')
    assert 1 == len(start_test)
    assert 1 == len(close_test)
    assert "return (String)" == start_test[0].get("channel", '')
    expected, actual = start_test[0].get("expected", ''), close_test[0].get(
        "generated", '')
    quote = quotes[language]
    assert expected[0] != quote and expected[-1] != quote
    assert actual[0] != quote and actual[-1] != quote
Esempio n. 16
0
def test_timeout(language_and_time: Tuple[str, int], tmp_path: Path,
                 pytestconfig):
    config_ = {
        "time_limit": language_and_time[1]  # seconds
    }
    conf = configuration(pytestconfig, "echo", language_and_time[0], tmp_path,
                         "full.tson", "correct", config_)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("start-testcase")) == 50
    status = updates.find_status_enum()
    correct = [x for x in status if x == "correct"]
    exceeded = [x for x in status if x == "time limit exceeded"]
    wrong = [x for x in status if x == "wrong"]
    # We should have at least one good result.
    assert len(correct) >= 1
    assert len(wrong) <= 2
    assert len(exceeded) >= 1
    # Once for every status, plus one escalation, plus one judgement-close
    assert len(wrong + correct + exceeded) == 50 + 1 + 1
Esempio n. 17
0
def test_cppcheck(tmp_path: Path, config, pytestconfig):
    conf = configuration(pytestconfig, "echo-function", "c", tmp_path,
                         "one.tson", "correct-cppcheck", config)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("annotate-code")) > 0
Esempio n. 18
0
def test_language_evaluator_exception(lang: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "division", lang, tmp_path, "plan.json",
                         "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 19
0
def test_io_function_exercise(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 20
0
def test_simple_programmed_eval(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo", language, tmp_path,
                         "one-programmed-correct.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 21
0
def test_named_parameters_supported(tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo-function", "python", tmp_path,
                         "one-named.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]
Esempio n. 22
0
def test_objects_error(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "objects", language, tmp_path,
                         "plan.tson", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["internal error"]
Esempio n. 23
0
def test_pylint(tmp_path: Path, config, pytestconfig):
    conf = configuration(pytestconfig, "counter", "python", tmp_path,
                         "plan.yaml", "solution-pylint", config)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("annotate-code")) > 0
Esempio n. 24
0
def test_shellcheck(tmp_path: Path, config, pytestconfig):
    conf = configuration(pytestconfig, "echo", "bash", tmp_path, "one.tson",
                         "wrong", config)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("annotate-code")) > 0
Esempio n. 25
0
def test_hlint(language: str, config, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "echo-function", language, tmp_path,
                         "one.tson", "correct_io", config)
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert len(updates.find_all("annotate-code")) > 0
Esempio n. 26
0
def test_global_variable_yaml(language: str, tmp_path: Path, pytestconfig):
    conf = configuration(pytestconfig, "global", language, tmp_path,
                         "plan.yaml", "correct")
    result = execute_config(conf)
    updates = assert_valid_output(result, pytestconfig)
    assert updates.find_status_enum() == ["correct"]